From 71077b07f380fac47b23b42b26894fef11963319 Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Wed, 24 Apr 2024 11:33:15 +0200 Subject: [PATCH 01/10] wip: broken --- config/config.json | 4 + include/Hacl_Ed25519.h | 33 +- include/Hacl_Hash_Blake2b.h | 122 +- include/Hacl_Hash_Blake2b_Simd256.h | 69 +- include/Hacl_Hash_Blake2s.h | 65 +- include/Hacl_Hash_Blake2s_Simd128.h | 71 +- include/Hacl_Hash_SHA3.h | 95 +- include/Hacl_Hash_SHA3_Simd256.h | 233 + include/Hacl_SHA2_Types.h | 51 + include/Hacl_SHA2_Vec128.h | 2 + include/Hacl_SHA2_Vec256.h | 1 + include/internal/Hacl_Frodo_KEM.h | 8 +- include/internal/Hacl_Hash_Blake2b.h | 14 + include/internal/Hacl_Hash_Blake2b_Simd256.h | 1 + include/internal/Hacl_Hash_Blake2s.h | 1 + include/internal/Hacl_Hash_Blake2s_Simd128.h | 1 + include/internal/Hacl_Hash_SHA3.h | 10 +- include/internal/Hacl_Impl_Blake2_Constants.h | 6 +- include/internal/Hacl_SHA2_Types.h | 27 +- include/lib_memzero0.h | 2 +- include/msvc/Hacl_Ed25519.h | 33 +- include/msvc/Hacl_Hash_SHA3.h | 93 +- include/msvc/Hacl_Hash_SHA3_Simd256.h | 226 + include/msvc/Hacl_SHA2_Types.h | 51 + include/msvc/Hacl_SHA2_Vec128.h | 2 + include/msvc/Hacl_SHA2_Vec256.h | 1 + include/msvc/internal/Hacl_Frodo_KEM.h | 8 +- include/msvc/internal/Hacl_Hash_Blake2b.h | 15 + .../msvc/internal/Hacl_Hash_Blake2b_Simd256.h | 1 + include/msvc/internal/Hacl_Hash_Blake2s.h | 1 + .../msvc/internal/Hacl_Hash_Blake2s_Simd128.h | 1 + include/msvc/internal/Hacl_Hash_SHA3.h | 10 +- include/msvc/internal/Hacl_SHA2_Types.h | 27 +- include/msvc/lib_memzero0.h | 2 +- ocaml/ctypes.depend | 38 +- .../lib/Hacl_Hash_Blake2b_Simd256_bindings.ml | 13 + ocaml/lib/Hacl_Hash_Blake2b_bindings.ml | 78 +- .../lib/Hacl_Hash_Blake2s_Simd128_bindings.ml | 13 + ocaml/lib/Hacl_Hash_Blake2s_bindings.ml | 46 +- ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml | 101 + ocaml/lib/Hacl_Hash_SHA3_bindings.ml | 53 +- ocaml/lib/Hacl_SHA2_Types_bindings.ml | 50 +- ocaml/lib_gen/Hacl_Hash_SHA3_Simd256_gen.ml | 10 + ocaml/lib_gen/Hacl_SHA2_Types_gen.ml | 3 +- src/EverCrypt_DRBG.c | 16 +- src/EverCrypt_Hash.c | 40 +- src/Hacl_Ed25519.c | 33 +- src/Hacl_Frodo1344.c | 68 +- src/Hacl_Frodo64.c | 68 +- src/Hacl_Frodo640.c | 68 +- src/Hacl_Frodo976.c | 68 +- src/Hacl_Hash_Blake2b.c | 602 +- src/Hacl_Hash_Blake2b_Simd256.c | 557 +- src/Hacl_Hash_Blake2s.c | 562 +- src/Hacl_Hash_Blake2s_Simd128.c | 550 +- src/Hacl_Hash_SHA3.c | 1988 ++- src/Hacl_Hash_SHA3_Simd256.c | 6733 +++++++++ src/Hacl_K256_ECDSA.c | 4 - src/Hacl_SHA2_Vec128.c | 60 +- src/Hacl_SHA2_Vec256.c | 60 +- src/Lib_RandomBuffer_System.c | 1 + src/msvc/EverCrypt_DRBG.c | 16 +- src/msvc/EverCrypt_Hash.c | 40 +- src/msvc/Hacl_Ed25519.c | 33 +- src/msvc/Hacl_Frodo1344.c | 68 +- src/msvc/Hacl_Frodo64.c | 68 +- src/msvc/Hacl_Frodo640.c | 68 +- src/msvc/Hacl_Frodo976.c | 68 +- src/msvc/Hacl_Hash_Blake2b.c | 80 +- src/msvc/Hacl_Hash_Blake2b_Simd256.c | 71 +- src/msvc/Hacl_Hash_Blake2s.c | 78 +- src/msvc/Hacl_Hash_Blake2s_Simd128.c | 68 +- src/msvc/Hacl_Hash_SHA3.c | 3793 ++++- src/msvc/Hacl_Hash_SHA3_Simd256.c | 11396 ++++++++++++++++ src/msvc/Hacl_K256_ECDSA.c | 4 - src/msvc/Hacl_SHA2_Vec128.c | 60 +- src/msvc/Hacl_SHA2_Vec256.c | 60 +- src/msvc/Lib_RandomBuffer_System.c | 1 + src/wasm/EverCrypt_Hash.wasm | Bin 49305 -> 48469 bytes .../Hacl_AEAD_Chacha20Poly1305_Simd256.wasm | Bin 1910 -> 1910 bytes src/wasm/Hacl_Ed25519_PrecompTable.wasm | Bin 16451 -> 16451 bytes src/wasm/Hacl_HMAC.wasm | Bin 29754 -> 28160 bytes src/wasm/Hacl_HMAC_DRBG.wasm | Bin 25396 -> 25396 bytes src/wasm/Hacl_Hash_Blake2b.wasm | Bin 15858 -> 16141 bytes src/wasm/Hacl_Hash_Blake2b_Simd256.wasm | Bin 6794 -> 7187 bytes src/wasm/Hacl_Hash_Blake2s.wasm | Bin 14005 -> 14331 bytes src/wasm/Hacl_Hash_Blake2s_Simd128.wasm | Bin 5638 -> 6030 bytes src/wasm/Hacl_SHA2_Vec128.wasm | Bin 5687 -> 5687 bytes src/wasm/INFO.txt | 4 +- src/wasm/layouts.json | 2 +- 90 files changed, 27795 insertions(+), 1353 deletions(-) create mode 100644 include/Hacl_Hash_SHA3_Simd256.h create mode 100644 include/Hacl_SHA2_Types.h create mode 100644 include/msvc/Hacl_Hash_SHA3_Simd256.h create mode 100644 include/msvc/Hacl_SHA2_Types.h create mode 100644 ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml create mode 100644 ocaml/lib_gen/Hacl_Hash_SHA3_Simd256_gen.ml create mode 100644 src/Hacl_Hash_SHA3_Simd256.c create mode 100644 src/msvc/Hacl_Hash_SHA3_Simd256.c diff --git a/config/config.json b/config/config.json index 7dd4cae3..c7d9247c 100644 --- a/config/config.json +++ b/config/config.json @@ -168,6 +168,10 @@ { "file": "Hacl_Hash_SHA3.c", "features": "std" + }, + { + "file": "Hacl_Hash_SHA3_Simd256.c", + "features": "vec256" } ], "sha2": [ diff --git a/include/Hacl_Ed25519.h b/include/Hacl_Ed25519.h index b2654704..f0dc31e2 100644 --- a/include/Hacl_Ed25519.h +++ b/include/Hacl_Ed25519.h @@ -47,16 +47,16 @@ extern "C" { /** Compute the public key from the private key. - The outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] public_key Points to 32 bytes of valid memory, i.e., `uint8_t[32]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. */ void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key); /** Compute the expanded keys for an Ed25519 signature. - The outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -66,11 +66,10 @@ void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key); /** Create an Ed25519 signature with the (precomputed) expanded keys. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The argument `expanded_keys` is obtained through `expand_keys`. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `expanded_keys` nor `msg`. + @param[in] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`, containing the expanded keys obtained by invoking `expand_keys`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -86,9 +85,10 @@ Hacl_Ed25519_sign_expanded( /** Create an Ed25519 signature. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `private_key` nor `msg`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. The function first calls `expand_keys` and then invokes `sign_expanded`. @@ -101,11 +101,12 @@ Hacl_Ed25519_sign(uint8_t *signature, uint8_t *private_key, uint32_t msg_len, ui /** Verify an Ed25519 signature. - The function returns `true` if the signature is valid and `false` otherwise. + @param public_key Points to 32 bytes of valid memory containing the public key, i.e., `uint8_t[32]`. + @param msg_len Length of `msg`. + @param msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. + @param signature Points to 64 bytes of valid memory containing the signature, i.e., `uint8_t[64]`. - The argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. + @return Returns `true` if the signature is valid and `false` otherwise. */ bool Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t *signature); diff --git a/include/Hacl_Hash_Blake2b.h b/include/Hacl_Hash_Blake2b.h index 414574f9..3403fc83 100644 --- a/include/Hacl_Hash_Blake2b.h +++ b/include/Hacl_Hash_Blake2b.h @@ -38,11 +38,34 @@ extern "C" { #include "Hacl_Streaming_Types.h" #include "Hacl_Krmllib.h" -typedef struct Hacl_Hash_Blake2b_block_state_t_s +typedef struct Hacl_Hash_Blake2b_blake2_params_s +{ + uint8_t digest_length; + uint8_t key_length; + uint8_t fanout; + uint8_t depth; + uint32_t leaf_length; + uint64_t node_offset; + uint8_t node_depth; + uint8_t inner_length; + uint8_t *salt; + uint8_t *personal; +} +Hacl_Hash_Blake2b_blake2_params; + +typedef struct K____uint64_t___uint64_t__s { uint64_t *fst; uint64_t *snd; } +K____uint64_t___uint64_t_; + +typedef struct Hacl_Hash_Blake2b_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____uint64_t___uint64_t_ thd; +} Hacl_Hash_Blake2b_block_state_t; typedef struct Hacl_Hash_Blake2b_state_t_s @@ -54,23 +77,90 @@ typedef struct Hacl_Hash_Blake2b_state_t_s Hacl_Hash_Blake2b_state_t; /** - State allocation function when there is no key + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + +*/ +Hacl_Hash_Blake2b_state_t +*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); + +/** + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t kk); + +/** + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void); /** - Re-initialization function when there is no key + General-purpose re-initialization function with parameters and +key. You cannot change digest_length or key_length, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. +*/ +void +Hacl_Hash_Blake2b_reset_with_key_and_params( + Hacl_Hash_Blake2b_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ -void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state); +void Hacl_Hash_Blake2b_reset_with_key(Hacl_Hash_Blake2b_state_t *s, uint8_t *k); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. +*/ +void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *s); + +/** + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your +parameters. */ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output); @@ -79,6 +169,11 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) */ void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state); +/** + Copying. This preserves all parameters. +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_copy(Hacl_Hash_Blake2b_state_t *state); + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -99,6 +194,21 @@ Hacl_Hash_Blake2b_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ +void +Hacl_Hash_Blake2b_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/Hacl_Hash_Blake2b_Simd256.h b/include/Hacl_Hash_Blake2b_Simd256.h index adddce66..af309dc8 100644 --- a/include/Hacl_Hash_Blake2b_Simd256.h +++ b/include/Hacl_Hash_Blake2b_Simd256.h @@ -37,13 +37,22 @@ extern "C" { #include "Hacl_Streaming_Types.h" #include "Hacl_Krmllib.h" +#include "Hacl_Hash_Blake2b.h" #include "libintvector.h" -typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s +typedef struct K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256__s { Lib_IntVector_Intrinsics_vec256 *fst; Lib_IntVector_Intrinsics_vec256 *snd; } +K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_; + +typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ thd; +} Hacl_Hash_Blake2b_Simd256_block_state_t; typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s @@ -54,15 +63,56 @@ typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s } Hacl_Hash_Blake2b_Simd256_state_t; +/** + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (256 for S, 64 for B).) +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (256 for S, 64 for B). +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk); + /** State allocation function when there is no key */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void); +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( + Hacl_Hash_Blake2b_Simd256_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void +Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k); + /** Re-initialization function when there is no key */ -void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state); +void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s); /** Update function when there is no key; 0 = success, 1 = max length exceeded @@ -85,6 +135,12 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 */ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state); +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state); + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -105,6 +161,15 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( uint32_t key_len ); +void +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/Hacl_Hash_Blake2s.h b/include/Hacl_Hash_Blake2s.h index 2c0d7c5b..ac783473 100644 --- a/include/Hacl_Hash_Blake2s.h +++ b/include/Hacl_Hash_Blake2s.h @@ -36,12 +36,21 @@ extern "C" { #include "krml/internal/target.h" #include "Hacl_Streaming_Types.h" +#include "Hacl_Hash_Blake2b.h" -typedef struct Hacl_Hash_Blake2s_block_state_t_s +typedef struct K____uint32_t___uint32_t__s { uint32_t *fst; uint32_t *snd; } +K____uint32_t___uint32_t_; + +typedef struct Hacl_Hash_Blake2s_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____uint32_t___uint32_t_ thd; +} Hacl_Hash_Blake2s_block_state_t; typedef struct Hacl_Hash_Blake2s_state_t_s @@ -52,15 +61,51 @@ typedef struct Hacl_Hash_Blake2s_state_t_s } Hacl_Hash_Blake2s_state_t; +/** + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (32 for S, 64 for B).) +*/ +Hacl_Hash_Blake2s_state_t +*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (32 for S, 64 for B). +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk); + /** State allocation function when there is no key */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void); +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_reset_with_key_and_params( + Hacl_Hash_Blake2s_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k); + /** Re-initialization function when there is no key */ -void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state); +void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s); /** Update function when there is no key; 0 = success, 1 = max length exceeded @@ -78,11 +123,16 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) */ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state); +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state); + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -98,6 +148,15 @@ Hacl_Hash_Blake2s_hash_with_key( uint32_t key_len ); +void +Hacl_Hash_Blake2s_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/Hacl_Hash_Blake2s_Simd128.h b/include/Hacl_Hash_Blake2s_Simd128.h index 6484005e..d725ee86 100644 --- a/include/Hacl_Hash_Blake2s_Simd128.h +++ b/include/Hacl_Hash_Blake2s_Simd128.h @@ -36,13 +36,22 @@ extern "C" { #include "krml/internal/target.h" #include "Hacl_Streaming_Types.h" +#include "Hacl_Hash_Blake2b.h" #include "libintvector.h" -typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s +typedef struct K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128__s { Lib_IntVector_Intrinsics_vec128 *fst; Lib_IntVector_Intrinsics_vec128 *snd; } +K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_; + +typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ thd; +} Hacl_Hash_Blake2s_Simd128_block_state_t; typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s @@ -53,15 +62,56 @@ typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s } Hacl_Hash_Blake2s_Simd128_state_t; +/** + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (128 for S, 64 for B).) +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (128 for S, 64 for B). +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk); + /** State allocation function when there is no key */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void); +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( + Hacl_Hash_Blake2s_Simd128_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void +Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k); + /** Re-initialization function when there is no key */ -void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state); +void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s); /** Update function when there is no key; 0 = success, 1 = max length exceeded @@ -84,11 +134,17 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 */ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state); +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state); + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -104,6 +160,15 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( uint32_t key_len ); +void +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/Hacl_Hash_SHA3.h b/include/Hacl_Hash_SHA3.h index e09f8745..8fb78fcd 100644 --- a/include/Hacl_Hash_SHA3.h +++ b/include/Hacl_Hash_SHA3.h @@ -77,49 +77,90 @@ uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s); bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s); +void Hacl_Hash_SHA3_absorb_inner_32(uint32_t rateInBytes, uint8_t *b, uint64_t *s); + void -Hacl_Hash_SHA3_shake128_hacl( - uint32_t inputByteLen, - uint8_t *input, +Hacl_Hash_SHA3_shake128( + uint8_t *output, uint32_t outputByteLen, - uint8_t *output + uint8_t *input, + uint32_t inputByteLen ); void -Hacl_Hash_SHA3_shake256_hacl( - uint32_t inputByteLen, - uint8_t *input, +Hacl_Hash_SHA3_shake256( + uint8_t *output, uint32_t outputByteLen, - uint8_t *output + uint8_t *input, + uint32_t inputByteLen ); -void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t input_len); +void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen); -void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t input_len); +/** +Allocate state buffer of 200-bytes +*/ +uint64_t *Hacl_Hash_SHA3_state_malloc(void); -void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t input_len); +/** +Free state buffer +*/ +void Hacl_Hash_SHA3_state_free(uint64_t *s); -void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t input_len); +/** +Absorb number of input blocks and write the output state -void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s); + This function is intended to receive a hash state and input buffer. + It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] +*/ void -Hacl_Hash_SHA3_squeeze0( - uint64_t *s, - uint32_t rateInBytes, - uint32_t outputByteLen, - uint8_t *output -); +Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t inputByteLen); + +/** +Absorb a final partial block of input and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses a sequence of bytes at end of input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffer are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] + + Note: Full size of input buffer must be passed to `inputByteLen` including + the number of full-block bytes at start of input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_shake128_absorb_final(uint64_t *state, uint8_t *input, uint32_t inputByteLen); + +/** +Squeeze a hash state to output buffer + This function is intended to receive a hash state and output buffer. + It produces an output of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN) points to hash state, i.e., uint64_t[25] + The argument `output` (OUT) points to `outputByteLen` bytes of valid memory, + i.e., uint8_t[outputByteLen] +*/ void -Hacl_Hash_SHA3_keccak( - uint32_t rate, - uint32_t capacity, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix, - uint32_t outputByteLen, - uint8_t *output +Hacl_Hash_SHA3_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen ); #if defined(__cplusplus) diff --git a/include/Hacl_Hash_SHA3_Simd256.h b/include/Hacl_Hash_SHA3_Simd256.h new file mode 100644 index 00000000..fc2b03b7 --- /dev/null +++ b/include/Hacl_Hash_SHA3_Simd256.h @@ -0,0 +1,233 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_Hash_SHA3_Simd256_H +#define __Hacl_Hash_SHA3_Simd256_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "libintvector.h" + +typedef struct K____uint8_t___uint8_t__s +{ + uint8_t *fst; + uint8_t *snd; +} +K____uint8_t___uint8_t_; + +typedef struct K____uint8_t__K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t___uint8_t_ snd; +} +K____uint8_t__K____uint8_t___uint8_t_; + +typedef struct K____uint8_t___uint8_t____K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t__K____uint8_t___uint8_t_ snd; +} +K____uint8_t___uint8_t____K____uint8_t___uint8_t_; + +void +Hacl_Hash_SHA3_Simd256_absorb_inner_256( + uint32_t rateInBytes, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec256 *s +); + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Allocate quadruple state buffer (200-bytes for each) +*/ +Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void); + +/** +Free quadruple state buffer +*/ +void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s); + +/** +Absorb number of blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Absorb a final partial blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses a sequence of bytes at end of each input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffers are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] + + Note: Full size of input buffers must be passed to `inputByteLen` including + the number of full-block bytes at start of each input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Squeeze a quadruple hash state to 4 output buffers + + This function is intended to receive a quadruple hash state and 4 output buffers. + It produces 4 outputs, each is multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +); + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_Hash_SHA3_Simd256_H_DEFINED +#endif diff --git a/include/Hacl_SHA2_Types.h b/include/Hacl_SHA2_Types.h new file mode 100644 index 00000000..d4260d77 --- /dev/null +++ b/include/Hacl_SHA2_Types.h @@ -0,0 +1,51 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_SHA2_Types_H +#define __Hacl_SHA2_Types_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "Hacl_Hash_SHA3_Simd256.h" + +typedef K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_2p; + +typedef K____uint8_t__K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_3p; + +typedef K____uint8_t___uint8_t____K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_4p; + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_SHA2_Types_H_DEFINED +#endif diff --git a/include/Hacl_SHA2_Vec128.h b/include/Hacl_SHA2_Vec128.h index 5118cd36..fa6aa99b 100644 --- a/include/Hacl_SHA2_Vec128.h +++ b/include/Hacl_SHA2_Vec128.h @@ -35,6 +35,8 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_Hash_SHA3_Simd256.h" + void Hacl_SHA2_Vec128_sha224_4( uint8_t *dst0, diff --git a/include/Hacl_SHA2_Vec256.h b/include/Hacl_SHA2_Vec256.h index e41e9fd4..734c6ddd 100644 --- a/include/Hacl_SHA2_Vec256.h +++ b/include/Hacl_SHA2_Vec256.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "Hacl_Krmllib.h" +#include "Hacl_Hash_SHA3_Simd256.h" void Hacl_SHA2_Vec256_sha224_8( diff --git a/include/internal/Hacl_Frodo_KEM.h b/include/internal/Hacl_Frodo_KEM.h index a4e2f62a..34b1816a 100644 --- a/include/internal/Hacl_Frodo_KEM.h +++ b/include/internal/Hacl_Frodo_KEM.h @@ -55,10 +55,10 @@ Hacl_Keccak_shake128_4x( uint8_t *output3 ) { - Hacl_Hash_SHA3_shake128_hacl(input_len, input0, output_len, output0); - Hacl_Hash_SHA3_shake128_hacl(input_len, input1, output_len, output1); - Hacl_Hash_SHA3_shake128_hacl(input_len, input2, output_len, output2); - Hacl_Hash_SHA3_shake128_hacl(input_len, input3, output_len, output3); + Hacl_Hash_SHA3_shake128(output0, output_len, input0, input_len); + Hacl_Hash_SHA3_shake128(output1, output_len, input1, input_len); + Hacl_Hash_SHA3_shake128(output2, output_len, input2, input_len); + Hacl_Hash_SHA3_shake128(output3, output_len, input3, input_len); } static inline void diff --git a/include/internal/Hacl_Hash_Blake2b.h b/include/internal/Hacl_Hash_Blake2b.h index 21689d60..6928d205 100644 --- a/include/internal/Hacl_Hash_Blake2b.h +++ b/include/internal/Hacl_Hash_Blake2b.h @@ -38,6 +38,13 @@ extern "C" { #include "internal/Hacl_Impl_Blake2_Constants.h" #include "../Hacl_Hash_Blake2b.h" +typedef struct Hacl_Hash_Blake2b_index_s +{ + uint8_t key_length; + uint8_t digest_length; +} +Hacl_Hash_Blake2b_index; + void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn); void @@ -62,6 +69,13 @@ Hacl_Hash_Blake2b_update_last( void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash); +typedef struct K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t__s +{ + Hacl_Hash_Blake2b_blake2_params *fst; + uint8_t *snd; +} +K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_; + #if defined(__cplusplus) } #endif diff --git a/include/internal/Hacl_Hash_Blake2b_Simd256.h b/include/internal/Hacl_Hash_Blake2b_Simd256.h index 4cc07869..4dd986b2 100644 --- a/include/internal/Hacl_Hash_Blake2b_Simd256.h +++ b/include/internal/Hacl_Hash_Blake2b_Simd256.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_Hash_Blake2b_Simd256.h" #include "libintvector.h" diff --git a/include/internal/Hacl_Hash_Blake2s.h b/include/internal/Hacl_Hash_Blake2s.h index f814aa95..eccd92de 100644 --- a/include/internal/Hacl_Hash_Blake2s.h +++ b/include/internal/Hacl_Hash_Blake2s.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_Hash_Blake2s.h" void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn); diff --git a/include/internal/Hacl_Hash_Blake2s_Simd128.h b/include/internal/Hacl_Hash_Blake2s_Simd128.h index 0589aec5..2c422949 100644 --- a/include/internal/Hacl_Hash_Blake2s_Simd128.h +++ b/include/internal/Hacl_Hash_Blake2s_Simd128.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_Hash_Blake2s_Simd128.h" #include "libintvector.h" diff --git a/include/internal/Hacl_Hash_SHA3.h b/include/internal/Hacl_Hash_SHA3.h index 1c8129fb..a82af4bd 100644 --- a/include/internal/Hacl_Hash_SHA3.h +++ b/include/internal/Hacl_Hash_SHA3.h @@ -37,6 +37,12 @@ extern "C" { #include "../Hacl_Hash_SHA3.h" +extern const uint32_t Hacl_Hash_SHA3_keccak_rotc[24U]; + +extern const uint32_t Hacl_Hash_SHA3_keccak_piln[24U]; + +extern const uint64_t Hacl_Hash_SHA3_keccak_rndc[24U]; + void Hacl_Hash_SHA3_update_multi_sha3( Spec_Hash_Definitions_hash_alg a, @@ -53,10 +59,6 @@ Hacl_Hash_SHA3_update_last_sha3( uint32_t input_len ); -void Hacl_Hash_SHA3_state_permute(uint64_t *s); - -void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s); - #if defined(__cplusplus) } #endif diff --git a/include/internal/Hacl_Impl_Blake2_Constants.h b/include/internal/Hacl_Impl_Blake2_Constants.h index aedc2486..fb3a045c 100644 --- a/include/internal/Hacl_Impl_Blake2_Constants.h +++ b/include/internal/Hacl_Impl_Blake2_Constants.h @@ -37,7 +37,7 @@ extern "C" { static const uint32_t -Hacl_Hash_Blake2s_sigmaTable[160U] = +Hacl_Hash_Blake2b_sigmaTable[160U] = { 0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U, 12U, 13U, 14U, 15U, 14U, 10U, 4U, 8U, 9U, 15U, 13U, 6U, 1U, 12U, 0U, 2U, 11U, 7U, 5U, 3U, 11U, 8U, 12U, 0U, 5U, 2U, 15U, 13U, 10U, 14U, 3U, 6U, @@ -51,7 +51,7 @@ Hacl_Hash_Blake2s_sigmaTable[160U] = static const uint32_t -Hacl_Hash_Blake2s_ivTable_S[8U] = +Hacl_Hash_Blake2b_ivTable_S[8U] = { 0x6A09E667U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, 0x510E527FU, 0x9B05688CU, 0x1F83D9ABU, 0x5BE0CD19U @@ -59,7 +59,7 @@ Hacl_Hash_Blake2s_ivTable_S[8U] = static const uint64_t -Hacl_Hash_Blake2s_ivTable_B[8U] = +Hacl_Hash_Blake2b_ivTable_B[8U] = { 0x6A09E667F3BCC908ULL, 0xBB67AE8584CAA73BULL, 0x3C6EF372FE94F82BULL, 0xA54FF53A5F1D36F1ULL, 0x510E527FADE682D1ULL, 0x9B05688C2B3E6C1FULL, 0x1F83D9ABFB41BD6BULL, 0x5BE0CD19137E2179ULL diff --git a/include/internal/Hacl_SHA2_Types.h b/include/internal/Hacl_SHA2_Types.h index 5a1eb668..3f07c80f 100644 --- a/include/internal/Hacl_SHA2_Types.h +++ b/include/internal/Hacl_SHA2_Types.h @@ -35,31 +35,12 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" -typedef struct Hacl_Hash_SHA2_uint8_2p_s -{ - uint8_t *fst; - uint8_t *snd; -} -Hacl_Hash_SHA2_uint8_2p; - -typedef struct Hacl_Hash_SHA2_uint8_3p_s -{ - uint8_t *fst; - Hacl_Hash_SHA2_uint8_2p snd; -} -Hacl_Hash_SHA2_uint8_3p; - -typedef struct Hacl_Hash_SHA2_uint8_4p_s -{ - uint8_t *fst; - Hacl_Hash_SHA2_uint8_3p snd; -} -Hacl_Hash_SHA2_uint8_4p; +#include "../Hacl_SHA2_Types.h" typedef struct Hacl_Hash_SHA2_uint8_5p_s { uint8_t *fst; - Hacl_Hash_SHA2_uint8_4p snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; } Hacl_Hash_SHA2_uint8_5p; @@ -86,8 +67,8 @@ Hacl_Hash_SHA2_uint8_8p; typedef struct Hacl_Hash_SHA2_uint8_2x4p_s { - Hacl_Hash_SHA2_uint8_4p fst; - Hacl_Hash_SHA2_uint8_4p snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; } Hacl_Hash_SHA2_uint8_2x4p; diff --git a/include/lib_memzero0.h b/include/lib_memzero0.h index 506dd50f..fea3e41c 100644 --- a/include/lib_memzero0.h +++ b/include/lib_memzero0.h @@ -2,4 +2,4 @@ void Lib_Memzero0_memzero0(void *dst, uint64_t len); -#define Lib_Memzero0_memzero(dst, len, t) Lib_Memzero0_memzero0(dst, len * sizeof(t)) +#define Lib_Memzero0_memzero(dst, len, t, _ret_t) Lib_Memzero0_memzero0(dst, len * sizeof(t)) diff --git a/include/msvc/Hacl_Ed25519.h b/include/msvc/Hacl_Ed25519.h index b2654704..f0dc31e2 100644 --- a/include/msvc/Hacl_Ed25519.h +++ b/include/msvc/Hacl_Ed25519.h @@ -47,16 +47,16 @@ extern "C" { /** Compute the public key from the private key. - The outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] public_key Points to 32 bytes of valid memory, i.e., `uint8_t[32]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. */ void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key); /** Compute the expanded keys for an Ed25519 signature. - The outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -66,11 +66,10 @@ void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key); /** Create an Ed25519 signature with the (precomputed) expanded keys. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The argument `expanded_keys` is obtained through `expand_keys`. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `expanded_keys` nor `msg`. + @param[in] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`, containing the expanded keys obtained by invoking `expand_keys`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -86,9 +85,10 @@ Hacl_Ed25519_sign_expanded( /** Create an Ed25519 signature. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `private_key` nor `msg`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. The function first calls `expand_keys` and then invokes `sign_expanded`. @@ -101,11 +101,12 @@ Hacl_Ed25519_sign(uint8_t *signature, uint8_t *private_key, uint32_t msg_len, ui /** Verify an Ed25519 signature. - The function returns `true` if the signature is valid and `false` otherwise. + @param public_key Points to 32 bytes of valid memory containing the public key, i.e., `uint8_t[32]`. + @param msg_len Length of `msg`. + @param msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. + @param signature Points to 64 bytes of valid memory containing the signature, i.e., `uint8_t[64]`. - The argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. + @return Returns `true` if the signature is valid and `false` otherwise. */ bool Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t *signature); diff --git a/include/msvc/Hacl_Hash_SHA3.h b/include/msvc/Hacl_Hash_SHA3.h index e09f8745..4b69f35a 100644 --- a/include/msvc/Hacl_Hash_SHA3.h +++ b/include/msvc/Hacl_Hash_SHA3.h @@ -78,48 +78,87 @@ uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s); bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s); void -Hacl_Hash_SHA3_shake128_hacl( - uint32_t inputByteLen, - uint8_t *input, +Hacl_Hash_SHA3_shake128( + uint8_t *output, uint32_t outputByteLen, - uint8_t *output + uint8_t *input, + uint32_t inputByteLen ); void -Hacl_Hash_SHA3_shake256_hacl( - uint32_t inputByteLen, - uint8_t *input, +Hacl_Hash_SHA3_shake256( + uint8_t *output, uint32_t outputByteLen, - uint8_t *output + uint8_t *input, + uint32_t inputByteLen ); -void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t input_len); +void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen); -void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t input_len); +/** +Allocate state buffer of 200-bytes +*/ +uint64_t *Hacl_Hash_SHA3_state_malloc(void); -void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t input_len); +/** +Free state buffer +*/ +void Hacl_Hash_SHA3_state_free(uint64_t *s); -void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t input_len); +/** +Absorb number of input blocks and write the output state -void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s); + This function is intended to receive a hash state and input buffer. + It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] +*/ void -Hacl_Hash_SHA3_squeeze0( - uint64_t *s, - uint32_t rateInBytes, - uint32_t outputByteLen, - uint8_t *output -); +Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t inputByteLen); + +/** +Absorb a final partial block of input and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses a sequence of bytes at end of input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffer are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] + + Note: Full size of input buffer must be passed to `inputByteLen` including + the number of full-block bytes at start of input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_shake128_absorb_final(uint64_t *state, uint8_t *input, uint32_t inputByteLen); + +/** +Squeeze a hash state to output buffer + This function is intended to receive a hash state and output buffer. + It produces an output of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN) points to hash state, i.e., uint64_t[25] + The argument `output` (OUT) points to `outputByteLen` bytes of valid memory, + i.e., uint8_t[outputByteLen] +*/ void -Hacl_Hash_SHA3_keccak( - uint32_t rate, - uint32_t capacity, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix, - uint32_t outputByteLen, - uint8_t *output +Hacl_Hash_SHA3_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen ); #if defined(__cplusplus) diff --git a/include/msvc/Hacl_Hash_SHA3_Simd256.h b/include/msvc/Hacl_Hash_SHA3_Simd256.h new file mode 100644 index 00000000..f38bf7cb --- /dev/null +++ b/include/msvc/Hacl_Hash_SHA3_Simd256.h @@ -0,0 +1,226 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_Hash_SHA3_Simd256_H +#define __Hacl_Hash_SHA3_Simd256_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "libintvector.h" + +typedef struct K____uint8_t___uint8_t__s +{ + uint8_t *fst; + uint8_t *snd; +} +K____uint8_t___uint8_t_; + +typedef struct K____uint8_t__K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t___uint8_t_ snd; +} +K____uint8_t__K____uint8_t___uint8_t_; + +typedef struct K____uint8_t___uint8_t____K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t__K____uint8_t___uint8_t_ snd; +} +K____uint8_t___uint8_t____K____uint8_t___uint8_t_; + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Allocate quadruple state buffer (200-bytes for each) +*/ +Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void); + +/** +Free quadruple state buffer +*/ +void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s); + +/** +Absorb number of blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Absorb a final partial blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses a sequence of bytes at end of each input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffers are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] + + Note: Full size of input buffers must be passed to `inputByteLen` including + the number of full-block bytes at start of each input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Squeeze a quadruple hash state to 4 output buffers + + This function is intended to receive a quadruple hash state and 4 output buffers. + It produces 4 outputs, each is multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +); + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_Hash_SHA3_Simd256_H_DEFINED +#endif diff --git a/include/msvc/Hacl_SHA2_Types.h b/include/msvc/Hacl_SHA2_Types.h new file mode 100644 index 00000000..d4260d77 --- /dev/null +++ b/include/msvc/Hacl_SHA2_Types.h @@ -0,0 +1,51 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_SHA2_Types_H +#define __Hacl_SHA2_Types_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "Hacl_Hash_SHA3_Simd256.h" + +typedef K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_2p; + +typedef K____uint8_t__K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_3p; + +typedef K____uint8_t___uint8_t____K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_4p; + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_SHA2_Types_H_DEFINED +#endif diff --git a/include/msvc/Hacl_SHA2_Vec128.h b/include/msvc/Hacl_SHA2_Vec128.h index 5118cd36..fa6aa99b 100644 --- a/include/msvc/Hacl_SHA2_Vec128.h +++ b/include/msvc/Hacl_SHA2_Vec128.h @@ -35,6 +35,8 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_Hash_SHA3_Simd256.h" + void Hacl_SHA2_Vec128_sha224_4( uint8_t *dst0, diff --git a/include/msvc/Hacl_SHA2_Vec256.h b/include/msvc/Hacl_SHA2_Vec256.h index e41e9fd4..734c6ddd 100644 --- a/include/msvc/Hacl_SHA2_Vec256.h +++ b/include/msvc/Hacl_SHA2_Vec256.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "Hacl_Krmllib.h" +#include "Hacl_Hash_SHA3_Simd256.h" void Hacl_SHA2_Vec256_sha224_8( diff --git a/include/msvc/internal/Hacl_Frodo_KEM.h b/include/msvc/internal/Hacl_Frodo_KEM.h index 6a1ece49..c03a1b03 100644 --- a/include/msvc/internal/Hacl_Frodo_KEM.h +++ b/include/msvc/internal/Hacl_Frodo_KEM.h @@ -55,10 +55,10 @@ Hacl_Keccak_shake128_4x( uint8_t *output3 ) { - Hacl_Hash_SHA3_shake128_hacl(input_len, input0, output_len, output0); - Hacl_Hash_SHA3_shake128_hacl(input_len, input1, output_len, output1); - Hacl_Hash_SHA3_shake128_hacl(input_len, input2, output_len, output2); - Hacl_Hash_SHA3_shake128_hacl(input_len, input3, output_len, output3); + Hacl_Hash_SHA3_shake128(output0, output_len, input0, input_len); + Hacl_Hash_SHA3_shake128(output1, output_len, input1, input_len); + Hacl_Hash_SHA3_shake128(output2, output_len, input2, input_len); + Hacl_Hash_SHA3_shake128(output3, output_len, input3, input_len); } static inline void diff --git a/include/msvc/internal/Hacl_Hash_Blake2b.h b/include/msvc/internal/Hacl_Hash_Blake2b.h index 21689d60..e2437d97 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2b.h +++ b/include/msvc/internal/Hacl_Hash_Blake2b.h @@ -38,6 +38,21 @@ extern "C" { #include "internal/Hacl_Impl_Blake2_Constants.h" #include "../Hacl_Hash_Blake2b.h" +typedef struct Hacl_Hash_Blake2s_blake2_params_s +{ + uint8_t digest_length; + uint8_t key_length; + uint8_t fanout; + uint8_t depth; + uint32_t leaf_length; + uint64_t node_offset; + uint8_t node_depth; + uint8_t inner_length; + uint8_t *salt; + uint8_t *personal; +} +Hacl_Hash_Blake2s_blake2_params; + void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn); void diff --git a/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h b/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h index 4cc07869..4dd986b2 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h +++ b/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_Hash_Blake2b_Simd256.h" #include "libintvector.h" diff --git a/include/msvc/internal/Hacl_Hash_Blake2s.h b/include/msvc/internal/Hacl_Hash_Blake2s.h index f814aa95..eccd92de 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2s.h +++ b/include/msvc/internal/Hacl_Hash_Blake2s.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_Hash_Blake2s.h" void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn); diff --git a/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h b/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h index 0589aec5..2c422949 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h +++ b/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h @@ -36,6 +36,7 @@ extern "C" { #include "krml/internal/target.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "../Hacl_Hash_Blake2s_Simd128.h" #include "libintvector.h" diff --git a/include/msvc/internal/Hacl_Hash_SHA3.h b/include/msvc/internal/Hacl_Hash_SHA3.h index 1c8129fb..a82af4bd 100644 --- a/include/msvc/internal/Hacl_Hash_SHA3.h +++ b/include/msvc/internal/Hacl_Hash_SHA3.h @@ -37,6 +37,12 @@ extern "C" { #include "../Hacl_Hash_SHA3.h" +extern const uint32_t Hacl_Hash_SHA3_keccak_rotc[24U]; + +extern const uint32_t Hacl_Hash_SHA3_keccak_piln[24U]; + +extern const uint64_t Hacl_Hash_SHA3_keccak_rndc[24U]; + void Hacl_Hash_SHA3_update_multi_sha3( Spec_Hash_Definitions_hash_alg a, @@ -53,10 +59,6 @@ Hacl_Hash_SHA3_update_last_sha3( uint32_t input_len ); -void Hacl_Hash_SHA3_state_permute(uint64_t *s); - -void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s); - #if defined(__cplusplus) } #endif diff --git a/include/msvc/internal/Hacl_SHA2_Types.h b/include/msvc/internal/Hacl_SHA2_Types.h index 5a1eb668..3f07c80f 100644 --- a/include/msvc/internal/Hacl_SHA2_Types.h +++ b/include/msvc/internal/Hacl_SHA2_Types.h @@ -35,31 +35,12 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" -typedef struct Hacl_Hash_SHA2_uint8_2p_s -{ - uint8_t *fst; - uint8_t *snd; -} -Hacl_Hash_SHA2_uint8_2p; - -typedef struct Hacl_Hash_SHA2_uint8_3p_s -{ - uint8_t *fst; - Hacl_Hash_SHA2_uint8_2p snd; -} -Hacl_Hash_SHA2_uint8_3p; - -typedef struct Hacl_Hash_SHA2_uint8_4p_s -{ - uint8_t *fst; - Hacl_Hash_SHA2_uint8_3p snd; -} -Hacl_Hash_SHA2_uint8_4p; +#include "../Hacl_SHA2_Types.h" typedef struct Hacl_Hash_SHA2_uint8_5p_s { uint8_t *fst; - Hacl_Hash_SHA2_uint8_4p snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; } Hacl_Hash_SHA2_uint8_5p; @@ -86,8 +67,8 @@ Hacl_Hash_SHA2_uint8_8p; typedef struct Hacl_Hash_SHA2_uint8_2x4p_s { - Hacl_Hash_SHA2_uint8_4p fst; - Hacl_Hash_SHA2_uint8_4p snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; } Hacl_Hash_SHA2_uint8_2x4p; diff --git a/include/msvc/lib_memzero0.h b/include/msvc/lib_memzero0.h index 506dd50f..fea3e41c 100644 --- a/include/msvc/lib_memzero0.h +++ b/include/msvc/lib_memzero0.h @@ -2,4 +2,4 @@ void Lib_Memzero0_memzero0(void *dst, uint64_t len); -#define Lib_Memzero0_memzero(dst, len, t) Lib_Memzero0_memzero0(dst, len * sizeof(t)) +#define Lib_Memzero0_memzero(dst, len, t, _ret_t) Lib_Memzero0_memzero0(dst, len * sizeof(t)) diff --git a/ocaml/ctypes.depend b/ocaml/ctypes.depend index 065ce5eb..8da61ba0 100644 --- a/ocaml/ctypes.depend +++ b/ocaml/ctypes.depend @@ -1,4 +1,4 @@ -CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx +CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx lib/Hacl_Streaming_Types_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmo: lib_gen/Hacl_Streaming_Types_gen.cmx: lib/Hacl_Streaming_Types_bindings.cmx @@ -11,18 +11,18 @@ lib/Hacl_Hash_Blake2b_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Ha lib/Hacl_Hash_Blake2b_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_Blake2b_gen.cmx: lib/Hacl_Hash_Blake2b_bindings.cmx lib_gen/Hacl_Hash_Blake2b_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_Blake2b_bindings.cmx lib_gen/Hacl_Hash_Blake2b_gen.cmx -lib/Hacl_Hash_Blake2s_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx -lib/Hacl_Hash_Blake2s_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo +lib/Hacl_Hash_Blake2s_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx +lib/Hacl_Hash_Blake2s_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib/Hacl_Hash_Blake2b_bindings.cmo lib/Hacl_Hash_Blake2b_stubs.cmo lib_gen/Hacl_Hash_Blake2s_gen.cmx: lib/Hacl_Hash_Blake2s_bindings.cmx -lib_gen/Hacl_Hash_Blake2s_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_Blake2s_bindings.cmx lib_gen/Hacl_Hash_Blake2s_gen.cmx -lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx: -lib/Hacl_Hash_Blake2b_Simd256_bindings.cmo: +lib_gen/Hacl_Hash_Blake2s_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_c_stubs.o lib/Hacl_Hash_Blake2s_bindings.cmx lib_gen/Hacl_Hash_Blake2s_gen.cmx +lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx +lib/Hacl_Hash_Blake2b_Simd256_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib/Hacl_Hash_Blake2b_bindings.cmo lib/Hacl_Hash_Blake2b_stubs.cmo lib_gen/Hacl_Hash_Blake2b_Simd256_gen.cmx: lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx -lib_gen/Hacl_Hash_Blake2b_Simd256_gen.exe: lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib_gen/Hacl_Hash_Blake2b_Simd256_gen.cmx -lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx: -lib/Hacl_Hash_Blake2s_Simd128_bindings.cmo: +lib_gen/Hacl_Hash_Blake2b_Simd256_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_c_stubs.o lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib_gen/Hacl_Hash_Blake2b_Simd256_gen.cmx +lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx +lib/Hacl_Hash_Blake2s_Simd128_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib/Hacl_Hash_Blake2b_bindings.cmo lib/Hacl_Hash_Blake2b_stubs.cmo lib_gen/Hacl_Hash_Blake2s_Simd128_gen.cmx: lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx -lib_gen/Hacl_Hash_Blake2s_Simd128_gen.exe: lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib_gen/Hacl_Hash_Blake2s_Simd128_gen.cmx +lib_gen/Hacl_Hash_Blake2s_Simd128_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_c_stubs.o lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib_gen/Hacl_Hash_Blake2s_Simd128_gen.cmx lib/Hacl_Hash_Base_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Hash_Base_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_Base_gen.cmx: lib/Hacl_Hash_Base_bindings.cmx @@ -51,14 +51,18 @@ lib/Hacl_Hash_SHA3_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_ lib/Hacl_Hash_SHA3_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_SHA3_gen.cmx: lib/Hacl_Hash_SHA3_bindings.cmx lib_gen/Hacl_Hash_SHA3_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_SHA3_bindings.cmx lib_gen/Hacl_Hash_SHA3_gen.cmx +lib/Hacl_Hash_SHA3_Simd256_bindings.cmx: +lib/Hacl_Hash_SHA3_Simd256_bindings.cmo: +lib_gen/Hacl_Hash_SHA3_Simd256_gen.cmx: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx +lib_gen/Hacl_Hash_SHA3_Simd256_gen.exe: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib_gen/Hacl_Hash_SHA3_Simd256_gen.cmx lib/Hacl_Hash_MD5_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_MD5_gen.cmx: lib/Hacl_Hash_MD5_bindings.cmx lib_gen/Hacl_Hash_MD5_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_MD5_bindings.cmx lib_gen/Hacl_Hash_MD5_gen.cmx -lib/Hacl_SHA2_Types_bindings.cmx: -lib/Hacl_SHA2_Types_bindings.cmo: +lib/Hacl_SHA2_Types_bindings.cmx: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx +lib/Hacl_SHA2_Types_bindings.cmo: lib/Hacl_Hash_SHA3_Simd256_bindings.cmo lib/Hacl_Hash_SHA3_Simd256_stubs.cmo lib_gen/Hacl_SHA2_Types_gen.cmx: lib/Hacl_SHA2_Types_bindings.cmx -lib_gen/Hacl_SHA2_Types_gen.exe: lib/Hacl_SHA2_Types_bindings.cmx lib_gen/Hacl_SHA2_Types_gen.cmx +lib_gen/Hacl_SHA2_Types_gen.exe: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx lib/Hacl_Hash_SHA3_Simd256_c_stubs.o lib/Hacl_SHA2_Types_bindings.cmx lib_gen/Hacl_SHA2_Types_gen.cmx lib/EverCrypt_Error_bindings.cmx: lib/EverCrypt_Error_bindings.cmo: lib_gen/EverCrypt_Error_gen.cmx: lib/EverCrypt_Error_bindings.cmx @@ -291,14 +295,14 @@ lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Imp lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx: lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx -lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx -lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo -lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx: lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx -lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx lib/EverCrypt_Poly1305_bindings.cmx: lib/EverCrypt_Poly1305_bindings.cmo: lib_gen/EverCrypt_Poly1305_gen.cmx: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.exe: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.cmx +lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx +lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo +lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx: lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx +lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve51_CP32_SHA512_gen.cmx: lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx diff --git a/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml index e7ef20d8..1c132a7a 100644 --- a/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2b_Simd256_bindings.ml @@ -2,6 +2,12 @@ open Ctypes module Bindings(F:Cstubs.FOREIGN) = struct open F + module Hacl_Streaming_Types_applied = + (Hacl_Streaming_Types_bindings.Bindings)(Hacl_Streaming_Types_stubs) + open Hacl_Streaming_Types_applied + module Hacl_Hash_Blake2b_applied = + (Hacl_Hash_Blake2b_bindings.Bindings)(Hacl_Hash_Blake2b_stubs) + open Hacl_Hash_Blake2b_applied let hacl_Hash_Blake2b_Simd256_hash_with_key = foreign "Hacl_Hash_Blake2b_Simd256_hash_with_key" (ocaml_bytes @-> @@ -9,4 +15,11 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) + let hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas = + foreign "Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> + (hacl_Hash_Blake2b_blake2_params @-> + (ocaml_bytes @-> (returning void)))))) end \ No newline at end of file diff --git a/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml index 75c75e90..7ba4fcf6 100644 --- a/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2b_bindings.ml @@ -5,21 +5,67 @@ module Bindings(F:Cstubs.FOREIGN) = module Hacl_Streaming_Types_applied = (Hacl_Streaming_Types_bindings.Bindings)(Hacl_Streaming_Types_stubs) open Hacl_Streaming_Types_applied + type hacl_Hash_Blake2b_blake2_params = + [ `hacl_Hash_Blake2b_blake2_params ] structure + let (hacl_Hash_Blake2b_blake2_params : + [ `hacl_Hash_Blake2b_blake2_params ] structure typ) = + structure "Hacl_Hash_Blake2b_blake2_params_s" + let hacl_Hash_Blake2b_blake2_params_digest_length = + field hacl_Hash_Blake2b_blake2_params "digest_length" uint8_t + let hacl_Hash_Blake2b_blake2_params_key_length = + field hacl_Hash_Blake2b_blake2_params "key_length" uint8_t + let hacl_Hash_Blake2b_blake2_params_fanout = + field hacl_Hash_Blake2b_blake2_params "fanout" uint8_t + let hacl_Hash_Blake2b_blake2_params_depth = + field hacl_Hash_Blake2b_blake2_params "depth" uint8_t + let hacl_Hash_Blake2b_blake2_params_leaf_length = + field hacl_Hash_Blake2b_blake2_params "leaf_length" uint32_t + let hacl_Hash_Blake2b_blake2_params_node_offset = + field hacl_Hash_Blake2b_blake2_params "node_offset" uint64_t + let hacl_Hash_Blake2b_blake2_params_node_depth = + field hacl_Hash_Blake2b_blake2_params "node_depth" uint8_t + let hacl_Hash_Blake2b_blake2_params_inner_length = + field hacl_Hash_Blake2b_blake2_params "inner_length" uint8_t + let hacl_Hash_Blake2b_blake2_params_salt = + field hacl_Hash_Blake2b_blake2_params "salt" (ptr uint8_t) + let hacl_Hash_Blake2b_blake2_params_personal = + field hacl_Hash_Blake2b_blake2_params "personal" (ptr uint8_t) + let _ = seal hacl_Hash_Blake2b_blake2_params + type hacl_Hash_Blake2b_index = [ `hacl_Hash_Blake2b_index ] structure + let (hacl_Hash_Blake2b_index : + [ `hacl_Hash_Blake2b_index ] structure typ) = + structure "Hacl_Hash_Blake2b_index_s" + let hacl_Hash_Blake2b_index_key_length = + field hacl_Hash_Blake2b_index "key_length" uint8_t + let hacl_Hash_Blake2b_index_digest_length = + field hacl_Hash_Blake2b_index "digest_length" uint8_t + let _ = seal hacl_Hash_Blake2b_index let hacl_Hash_Blake2b_init = foreign "Hacl_Hash_Blake2b_init" ((ptr uint64_t) @-> (uint32_t @-> (uint32_t @-> (returning void)))) let hacl_Hash_Blake2b_finish = foreign "Hacl_Hash_Blake2b_finish" (uint32_t @-> (ocaml_bytes @-> ((ptr uint64_t) @-> (returning void)))) + type k____uint64_t___uint64_t_ = [ `k____uint64_t___uint64_t_ ] structure + let (k____uint64_t___uint64_t_ : + [ `k____uint64_t___uint64_t_ ] structure typ) = + structure "K____uint64_t___uint64_t__s" + let k____uint64_t___uint64_t__fst = + field k____uint64_t___uint64_t_ "fst" (ptr uint64_t) + let k____uint64_t___uint64_t__snd = + field k____uint64_t___uint64_t_ "snd" (ptr uint64_t) + let _ = seal k____uint64_t___uint64_t_ type hacl_Hash_Blake2b_block_state_t = [ `hacl_Hash_Blake2b_block_state_t ] structure let (hacl_Hash_Blake2b_block_state_t : [ `hacl_Hash_Blake2b_block_state_t ] structure typ) = structure "Hacl_Hash_Blake2b_block_state_t_s" let hacl_Hash_Blake2b_block_state_t_fst = - field hacl_Hash_Blake2b_block_state_t "fst" (ptr uint64_t) + field hacl_Hash_Blake2b_block_state_t "fst" uint8_t let hacl_Hash_Blake2b_block_state_t_snd = - field hacl_Hash_Blake2b_block_state_t "snd" (ptr uint64_t) + field hacl_Hash_Blake2b_block_state_t "snd" uint8_t + let hacl_Hash_Blake2b_block_state_t_thd = + field hacl_Hash_Blake2b_block_state_t "thd" k____uint64_t___uint64_t_ let _ = seal hacl_Hash_Blake2b_block_state_t type hacl_Hash_Blake2b_state_t = [ `hacl_Hash_Blake2b_state_t ] structure let (hacl_Hash_Blake2b_state_t : @@ -33,9 +79,26 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2b_state_t_total_len = field hacl_Hash_Blake2b_state_t "total_len" uint64_t let _ = seal hacl_Hash_Blake2b_state_t + let hacl_Hash_Blake2b_malloc_with_params_and_key = + foreign "Hacl_Hash_Blake2b_malloc_with_params_and_key" + ((ptr hacl_Hash_Blake2b_blake2_params) @-> + (ocaml_bytes @-> (returning (ptr hacl_Hash_Blake2b_state_t)))) + let hacl_Hash_Blake2b_malloc_with_key = + foreign "Hacl_Hash_Blake2b_malloc_with_key" + (ocaml_bytes @-> + (uint8_t @-> (returning (ptr hacl_Hash_Blake2b_state_t)))) let hacl_Hash_Blake2b_malloc = foreign "Hacl_Hash_Blake2b_malloc" (void @-> (returning (ptr hacl_Hash_Blake2b_state_t))) + let hacl_Hash_Blake2b_reset_with_key_and_params = + foreign "Hacl_Hash_Blake2b_reset_with_key_and_params" + ((ptr hacl_Hash_Blake2b_state_t) @-> + ((ptr hacl_Hash_Blake2b_blake2_params) @-> + (ocaml_bytes @-> (returning void)))) + let hacl_Hash_Blake2b_reset_with_key = + foreign "Hacl_Hash_Blake2b_reset_with_key" + ((ptr hacl_Hash_Blake2b_state_t) @-> + (ocaml_bytes @-> (returning void))) let hacl_Hash_Blake2b_reset = foreign "Hacl_Hash_Blake2b_reset" ((ptr hacl_Hash_Blake2b_state_t) @-> (returning void)) @@ -51,6 +114,10 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2b_free = foreign "Hacl_Hash_Blake2b_free" ((ptr hacl_Hash_Blake2b_state_t) @-> (returning void)) + let hacl_Hash_Blake2b_copy = + foreign "Hacl_Hash_Blake2b_copy" + ((ptr hacl_Hash_Blake2b_state_t) @-> + (returning (ptr hacl_Hash_Blake2b_state_t))) let hacl_Hash_Blake2b_hash_with_key = foreign "Hacl_Hash_Blake2b_hash_with_key" (ocaml_bytes @-> @@ -58,4 +125,11 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) + let hacl_Hash_Blake2b_hash_with_key_and_paramas = + foreign "Hacl_Hash_Blake2b_hash_with_key_and_paramas" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> + (hacl_Hash_Blake2b_blake2_params @-> + (ocaml_bytes @-> (returning void)))))) end \ No newline at end of file diff --git a/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml index db4ff123..6533ddbc 100644 --- a/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2s_Simd128_bindings.ml @@ -2,6 +2,12 @@ open Ctypes module Bindings(F:Cstubs.FOREIGN) = struct open F + module Hacl_Streaming_Types_applied = + (Hacl_Streaming_Types_bindings.Bindings)(Hacl_Streaming_Types_stubs) + open Hacl_Streaming_Types_applied + module Hacl_Hash_Blake2b_applied = + (Hacl_Hash_Blake2b_bindings.Bindings)(Hacl_Hash_Blake2b_stubs) + open Hacl_Hash_Blake2b_applied let hacl_Hash_Blake2s_Simd128_hash_with_key = foreign "Hacl_Hash_Blake2s_Simd128_hash_with_key" (ocaml_bytes @-> @@ -9,4 +15,11 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) + let hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas = + foreign "Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> + (hacl_Hash_Blake2b_blake2_params @-> + (ocaml_bytes @-> (returning void)))))) end \ No newline at end of file diff --git a/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml b/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml index 7f057689..f6c93e89 100644 --- a/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml +++ b/ocaml/lib/Hacl_Hash_Blake2s_bindings.ml @@ -5,6 +5,9 @@ module Bindings(F:Cstubs.FOREIGN) = module Hacl_Streaming_Types_applied = (Hacl_Streaming_Types_bindings.Bindings)(Hacl_Streaming_Types_stubs) open Hacl_Streaming_Types_applied + module Hacl_Hash_Blake2b_applied = + (Hacl_Hash_Blake2b_bindings.Bindings)(Hacl_Hash_Blake2b_stubs) + open Hacl_Hash_Blake2b_applied let hacl_Hash_Blake2s_init = foreign "Hacl_Hash_Blake2s_init" ((ptr uint32_t) @-> (uint32_t @-> (uint32_t @-> (returning void)))) @@ -25,15 +28,26 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2s_finish = foreign "Hacl_Hash_Blake2s_finish" (uint32_t @-> (ocaml_bytes @-> ((ptr uint32_t) @-> (returning void)))) + type k____uint32_t___uint32_t_ = [ `k____uint32_t___uint32_t_ ] structure + let (k____uint32_t___uint32_t_ : + [ `k____uint32_t___uint32_t_ ] structure typ) = + structure "K____uint32_t___uint32_t__s" + let k____uint32_t___uint32_t__fst = + field k____uint32_t___uint32_t_ "fst" (ptr uint32_t) + let k____uint32_t___uint32_t__snd = + field k____uint32_t___uint32_t_ "snd" (ptr uint32_t) + let _ = seal k____uint32_t___uint32_t_ type hacl_Hash_Blake2s_block_state_t = [ `hacl_Hash_Blake2s_block_state_t ] structure let (hacl_Hash_Blake2s_block_state_t : [ `hacl_Hash_Blake2s_block_state_t ] structure typ) = structure "Hacl_Hash_Blake2s_block_state_t_s" let hacl_Hash_Blake2s_block_state_t_fst = - field hacl_Hash_Blake2s_block_state_t "fst" (ptr uint32_t) + field hacl_Hash_Blake2s_block_state_t "fst" uint8_t let hacl_Hash_Blake2s_block_state_t_snd = - field hacl_Hash_Blake2s_block_state_t "snd" (ptr uint32_t) + field hacl_Hash_Blake2s_block_state_t "snd" uint8_t + let hacl_Hash_Blake2s_block_state_t_thd = + field hacl_Hash_Blake2s_block_state_t "thd" k____uint32_t___uint32_t_ let _ = seal hacl_Hash_Blake2s_block_state_t type hacl_Hash_Blake2s_state_t = [ `hacl_Hash_Blake2s_state_t ] structure let (hacl_Hash_Blake2s_state_t : @@ -47,9 +61,26 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2s_state_t_total_len = field hacl_Hash_Blake2s_state_t "total_len" uint64_t let _ = seal hacl_Hash_Blake2s_state_t + let hacl_Hash_Blake2s_malloc_with_params_and_key = + foreign "Hacl_Hash_Blake2s_malloc_with_params_and_key" + ((ptr hacl_Hash_Blake2b_blake2_params) @-> + (ocaml_bytes @-> (returning (ptr hacl_Hash_Blake2s_state_t)))) + let hacl_Hash_Blake2s_malloc_with_key = + foreign "Hacl_Hash_Blake2s_malloc_with_key" + (ocaml_bytes @-> + (uint8_t @-> (returning (ptr hacl_Hash_Blake2s_state_t)))) let hacl_Hash_Blake2s_malloc = foreign "Hacl_Hash_Blake2s_malloc" (void @-> (returning (ptr hacl_Hash_Blake2s_state_t))) + let hacl_Hash_Blake2s_reset_with_key_and_params = + foreign "Hacl_Hash_Blake2s_reset_with_key_and_params" + ((ptr hacl_Hash_Blake2s_state_t) @-> + ((ptr hacl_Hash_Blake2b_blake2_params) @-> + (ocaml_bytes @-> (returning void)))) + let hacl_Hash_Blake2s_reset_with_key = + foreign "Hacl_Hash_Blake2s_reset_with_key" + ((ptr hacl_Hash_Blake2s_state_t) @-> + (ocaml_bytes @-> (returning void))) let hacl_Hash_Blake2s_reset = foreign "Hacl_Hash_Blake2s_reset" ((ptr hacl_Hash_Blake2s_state_t) @-> (returning void)) @@ -65,6 +96,10 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_Blake2s_free = foreign "Hacl_Hash_Blake2s_free" ((ptr hacl_Hash_Blake2s_state_t) @-> (returning void)) + let hacl_Hash_Blake2s_copy = + foreign "Hacl_Hash_Blake2s_copy" + ((ptr hacl_Hash_Blake2s_state_t) @-> + (returning (ptr hacl_Hash_Blake2s_state_t))) let hacl_Hash_Blake2s_hash_with_key = foreign "Hacl_Hash_Blake2s_hash_with_key" (ocaml_bytes @-> @@ -72,4 +107,11 @@ module Bindings(F:Cstubs.FOREIGN) = (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))))) + let hacl_Hash_Blake2s_hash_with_key_and_paramas = + foreign "Hacl_Hash_Blake2s_hash_with_key_and_paramas" + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> + (hacl_Hash_Blake2b_blake2_params @-> + (ocaml_bytes @-> (returning void)))))) end \ No newline at end of file diff --git a/ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml b/ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml new file mode 100644 index 00000000..c5e160c6 --- /dev/null +++ b/ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml @@ -0,0 +1,101 @@ +open Ctypes +module Bindings(F:Cstubs.FOREIGN) = + struct + open F + type k____uint8_t___uint8_t_ = [ `k____uint8_t___uint8_t_ ] structure + let (k____uint8_t___uint8_t_ : + [ `k____uint8_t___uint8_t_ ] structure typ) = + structure "K____uint8_t___uint8_t__s" + let k____uint8_t___uint8_t__fst = + field k____uint8_t___uint8_t_ "fst" (ptr uint8_t) + let k____uint8_t___uint8_t__snd = + field k____uint8_t___uint8_t_ "snd" (ptr uint8_t) + let _ = seal k____uint8_t___uint8_t_ + type k____uint8_t__K____uint8_t___uint8_t_ = + [ `k____uint8_t__K____uint8_t___uint8_t_ ] structure + let (k____uint8_t__K____uint8_t___uint8_t_ : + [ `k____uint8_t__K____uint8_t___uint8_t_ ] structure typ) = + structure "K____uint8_t__K____uint8_t___uint8_t__s" + let k____uint8_t__K____uint8_t___uint8_t__fst = + field k____uint8_t__K____uint8_t___uint8_t_ "fst" (ptr uint8_t) + let k____uint8_t__K____uint8_t___uint8_t__snd = + field k____uint8_t__K____uint8_t___uint8_t_ "snd" + k____uint8_t___uint8_t_ + let _ = seal k____uint8_t__K____uint8_t___uint8_t_ + type k____uint8_t___uint8_t____K____uint8_t___uint8_t_ = + [ `k____uint8_t___uint8_t____K____uint8_t___uint8_t_ ] structure + let (k____uint8_t___uint8_t____K____uint8_t___uint8_t_ : + [ `k____uint8_t___uint8_t____K____uint8_t___uint8_t_ ] structure typ) = + structure "K____uint8_t___uint8_t____K____uint8_t___uint8_t__s" + let k____uint8_t___uint8_t____K____uint8_t___uint8_t__fst = + field k____uint8_t___uint8_t____K____uint8_t___uint8_t_ "fst" + (ptr uint8_t) + let k____uint8_t___uint8_t____K____uint8_t___uint8_t__snd = + field k____uint8_t___uint8_t____K____uint8_t___uint8_t_ "snd" + k____uint8_t__K____uint8_t___uint8_t_ + let _ = seal k____uint8_t___uint8_t____K____uint8_t___uint8_t_ + let hacl_Hash_SHA3_Simd256_shake128 = + foreign "Hacl_Hash_SHA3_Simd256_shake128" + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (returning void))))))))))) + let hacl_Hash_SHA3_Simd256_shake256 = + foreign "Hacl_Hash_SHA3_Simd256_shake256" + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (uint32_t @-> (returning void))))))))))) + let hacl_Hash_SHA3_Simd256_sha3_224 = + foreign "Hacl_Hash_SHA3_Simd256_sha3_224" + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> (uint32_t @-> (returning void)))))))))) + let hacl_Hash_SHA3_Simd256_sha3_256 = + foreign "Hacl_Hash_SHA3_Simd256_sha3_256" + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> (uint32_t @-> (returning void)))))))))) + let hacl_Hash_SHA3_Simd256_sha3_384 = + foreign "Hacl_Hash_SHA3_Simd256_sha3_384" + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> (uint32_t @-> (returning void)))))))))) + let hacl_Hash_SHA3_Simd256_sha3_512 = + foreign "Hacl_Hash_SHA3_Simd256_sha3_512" + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> + (ocaml_bytes @-> (uint32_t @-> (returning void)))))))))) + end \ No newline at end of file diff --git a/ocaml/lib/Hacl_Hash_SHA3_bindings.ml b/ocaml/lib/Hacl_Hash_SHA3_bindings.ml index 45718cbd..dd900903 100644 --- a/ocaml/lib/Hacl_Hash_SHA3_bindings.ml +++ b/ocaml/lib/Hacl_Hash_SHA3_bindings.ml @@ -75,14 +75,17 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_SHA3_is_shake = foreign "Hacl_Hash_SHA3_is_shake" ((ptr hacl_Hash_SHA3_state_t) @-> (returning bool)) - let hacl_Hash_SHA3_shake128_hacl = - foreign "Hacl_Hash_SHA3_shake128_hacl" - (uint32_t @-> - (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (returning void))))) - let hacl_Hash_SHA3_shake256_hacl = - foreign "Hacl_Hash_SHA3_shake256_hacl" - (uint32_t @-> - (ocaml_bytes @-> (uint32_t @-> (ocaml_bytes @-> (returning void))))) + let hacl_Hash_SHA3_absorb_inner_32 = + foreign "Hacl_Hash_SHA3_absorb_inner_32" + (uint32_t @-> (ocaml_bytes @-> ((ptr uint64_t) @-> (returning void)))) + let hacl_Hash_SHA3_shake128 = + foreign "Hacl_Hash_SHA3_shake128" + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))) + let hacl_Hash_SHA3_shake256 = + foreign "Hacl_Hash_SHA3_shake256" + (ocaml_bytes @-> + (uint32_t @-> (ocaml_bytes @-> (uint32_t @-> (returning void))))) let hacl_Hash_SHA3_sha3_224 = foreign "Hacl_Hash_SHA3_sha3_224" (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))) @@ -95,25 +98,19 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_SHA3_sha3_512 = foreign "Hacl_Hash_SHA3_sha3_512" (ocaml_bytes @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))) - let hacl_Hash_SHA3_state_permute = - foreign "Hacl_Hash_SHA3_state_permute" + let hacl_Hash_SHA3_state_malloc = + foreign "Hacl_Hash_SHA3_state_malloc" + (void @-> (returning (ptr uint64_t))) + let hacl_Hash_SHA3_state_free = + foreign "Hacl_Hash_SHA3_state_free" ((ptr uint64_t) @-> (returning void)) - let hacl_Hash_SHA3_loadState = - foreign "Hacl_Hash_SHA3_loadState" - (uint32_t @-> (ocaml_bytes @-> ((ptr uint64_t) @-> (returning void)))) - let hacl_Hash_SHA3_absorb_inner = - foreign "Hacl_Hash_SHA3_absorb_inner" - (uint32_t @-> (ocaml_bytes @-> ((ptr uint64_t) @-> (returning void)))) - let hacl_Hash_SHA3_squeeze0 = - foreign "Hacl_Hash_SHA3_squeeze0" - ((ptr uint64_t) @-> - (uint32_t @-> (uint32_t @-> (ocaml_bytes @-> (returning void))))) - let hacl_Hash_SHA3_keccak = - foreign "Hacl_Hash_SHA3_keccak" - (uint32_t @-> - (uint32_t @-> - (uint32_t @-> - (ocaml_bytes @-> - (uint8_t @-> - (uint32_t @-> (ocaml_bytes @-> (returning void)))))))) + let hacl_Hash_SHA3_shake128_absorb_nblocks = + foreign "Hacl_Hash_SHA3_shake128_absorb_nblocks" + ((ptr uint64_t) @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))) + let hacl_Hash_SHA3_shake128_absorb_final = + foreign "Hacl_Hash_SHA3_shake128_absorb_final" + ((ptr uint64_t) @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))) + let hacl_Hash_SHA3_shake128_squeeze_nblocks = + foreign "Hacl_Hash_SHA3_shake128_squeeze_nblocks" + ((ptr uint64_t) @-> (ocaml_bytes @-> (uint32_t @-> (returning void)))) end \ No newline at end of file diff --git a/ocaml/lib/Hacl_SHA2_Types_bindings.ml b/ocaml/lib/Hacl_SHA2_Types_bindings.ml index 274f959d..d2b67650 100644 --- a/ocaml/lib/Hacl_SHA2_Types_bindings.ml +++ b/ocaml/lib/Hacl_SHA2_Types_bindings.ml @@ -2,33 +2,20 @@ open Ctypes module Bindings(F:Cstubs.FOREIGN) = struct open F - type hacl_Hash_SHA2_uint8_2p = [ `hacl_Hash_SHA2_uint8_2p ] structure - let (hacl_Hash_SHA2_uint8_2p : - [ `hacl_Hash_SHA2_uint8_2p ] structure typ) = - structure "Hacl_Hash_SHA2_uint8_2p_s" - let hacl_Hash_SHA2_uint8_2p_fst = - field hacl_Hash_SHA2_uint8_2p "fst" (ptr uint8_t) - let hacl_Hash_SHA2_uint8_2p_snd = - field hacl_Hash_SHA2_uint8_2p "snd" (ptr uint8_t) - let _ = seal hacl_Hash_SHA2_uint8_2p - type hacl_Hash_SHA2_uint8_3p = [ `hacl_Hash_SHA2_uint8_3p ] structure - let (hacl_Hash_SHA2_uint8_3p : - [ `hacl_Hash_SHA2_uint8_3p ] structure typ) = - structure "Hacl_Hash_SHA2_uint8_3p_s" - let hacl_Hash_SHA2_uint8_3p_fst = - field hacl_Hash_SHA2_uint8_3p "fst" (ptr uint8_t) - let hacl_Hash_SHA2_uint8_3p_snd = - field hacl_Hash_SHA2_uint8_3p "snd" hacl_Hash_SHA2_uint8_2p - let _ = seal hacl_Hash_SHA2_uint8_3p - type hacl_Hash_SHA2_uint8_4p = [ `hacl_Hash_SHA2_uint8_4p ] structure - let (hacl_Hash_SHA2_uint8_4p : - [ `hacl_Hash_SHA2_uint8_4p ] structure typ) = - structure "Hacl_Hash_SHA2_uint8_4p_s" - let hacl_Hash_SHA2_uint8_4p_fst = - field hacl_Hash_SHA2_uint8_4p "fst" (ptr uint8_t) - let hacl_Hash_SHA2_uint8_4p_snd = - field hacl_Hash_SHA2_uint8_4p "snd" hacl_Hash_SHA2_uint8_3p - let _ = seal hacl_Hash_SHA2_uint8_4p + module Hacl_Hash_SHA3_Simd256_applied = + (Hacl_Hash_SHA3_Simd256_bindings.Bindings)(Hacl_Hash_SHA3_Simd256_stubs) + open Hacl_Hash_SHA3_Simd256_applied + type hacl_Hash_SHA2_uint8_2p = k____uint8_t___uint8_t_ + let hacl_Hash_SHA2_uint8_2p = + typedef k____uint8_t___uint8_t_ "Hacl_Hash_SHA2_uint8_2p" + type hacl_Hash_SHA2_uint8_3p = k____uint8_t__K____uint8_t___uint8_t_ + let hacl_Hash_SHA2_uint8_3p = + typedef k____uint8_t__K____uint8_t___uint8_t_ "Hacl_Hash_SHA2_uint8_3p" + type hacl_Hash_SHA2_uint8_4p = + k____uint8_t___uint8_t____K____uint8_t___uint8_t_ + let hacl_Hash_SHA2_uint8_4p = + typedef k____uint8_t___uint8_t____K____uint8_t___uint8_t_ + "Hacl_Hash_SHA2_uint8_4p" type hacl_Hash_SHA2_uint8_5p = [ `hacl_Hash_SHA2_uint8_5p ] structure let (hacl_Hash_SHA2_uint8_5p : [ `hacl_Hash_SHA2_uint8_5p ] structure typ) = @@ -36,7 +23,8 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_SHA2_uint8_5p_fst = field hacl_Hash_SHA2_uint8_5p "fst" (ptr uint8_t) let hacl_Hash_SHA2_uint8_5p_snd = - field hacl_Hash_SHA2_uint8_5p "snd" hacl_Hash_SHA2_uint8_4p + field hacl_Hash_SHA2_uint8_5p "snd" + k____uint8_t___uint8_t____K____uint8_t___uint8_t_ let _ = seal hacl_Hash_SHA2_uint8_5p type hacl_Hash_SHA2_uint8_6p = [ `hacl_Hash_SHA2_uint8_6p ] structure let (hacl_Hash_SHA2_uint8_6p : @@ -70,9 +58,11 @@ module Bindings(F:Cstubs.FOREIGN) = [ `hacl_Hash_SHA2_uint8_2x4p ] structure typ) = structure "Hacl_Hash_SHA2_uint8_2x4p_s" let hacl_Hash_SHA2_uint8_2x4p_fst = - field hacl_Hash_SHA2_uint8_2x4p "fst" hacl_Hash_SHA2_uint8_4p + field hacl_Hash_SHA2_uint8_2x4p "fst" + k____uint8_t___uint8_t____K____uint8_t___uint8_t_ let hacl_Hash_SHA2_uint8_2x4p_snd = - field hacl_Hash_SHA2_uint8_2x4p "snd" hacl_Hash_SHA2_uint8_4p + field hacl_Hash_SHA2_uint8_2x4p "snd" + k____uint8_t___uint8_t____K____uint8_t___uint8_t_ let _ = seal hacl_Hash_SHA2_uint8_2x4p type hacl_Hash_SHA2_uint8_2x8p = [ `hacl_Hash_SHA2_uint8_2x8p ] structure let (hacl_Hash_SHA2_uint8_2x8p : diff --git a/ocaml/lib_gen/Hacl_Hash_SHA3_Simd256_gen.ml b/ocaml/lib_gen/Hacl_Hash_SHA3_Simd256_gen.ml new file mode 100644 index 00000000..db9025c2 --- /dev/null +++ b/ocaml/lib_gen/Hacl_Hash_SHA3_Simd256_gen.ml @@ -0,0 +1,10 @@ +let _ = + (((Format.set_formatter_out_channel + (open_out_bin "lib/Hacl_Hash_SHA3_Simd256_stubs.ml"); + Cstubs.write_ml Format.std_formatter ~prefix:"" + (module Hacl_Hash_SHA3_Simd256_bindings.Bindings)); + Format.set_formatter_out_channel + (open_out_bin "lib/Hacl_Hash_SHA3_Simd256_c_stubs.c")); + Format.printf "#include \"Hacl_Hash_SHA3_Simd256.h\"\n"); + Cstubs.write_c Format.std_formatter ~prefix:"" + (module Hacl_Hash_SHA3_Simd256_bindings.Bindings) \ No newline at end of file diff --git a/ocaml/lib_gen/Hacl_SHA2_Types_gen.ml b/ocaml/lib_gen/Hacl_SHA2_Types_gen.ml index affc9abf..bec1bcb3 100644 --- a/ocaml/lib_gen/Hacl_SHA2_Types_gen.ml +++ b/ocaml/lib_gen/Hacl_SHA2_Types_gen.ml @@ -5,6 +5,7 @@ let _ = (module Hacl_SHA2_Types_bindings.Bindings)); Format.set_formatter_out_channel (open_out_bin "lib/Hacl_SHA2_Types_c_stubs.c")); - Format.printf "#include \"internal/Hacl_SHA2_Types.h\"\n"); + Format.printf + "#include \"Hacl_SHA2_Types.h\"\n#include \"internal/Hacl_SHA2_Types.h\"\n"); Cstubs.write_c Format.std_formatter ~prefix:"" (module Hacl_SHA2_Types_bindings.Bindings) \ No newline at end of file diff --git a/src/EverCrypt_DRBG.c b/src/EverCrypt_DRBG.c index 301fe528..a831a5b5 100644 --- a/src/EverCrypt_DRBG.c +++ b/src/EverCrypt_DRBG.c @@ -1770,8 +1770,8 @@ static void uninstantiate_sha1(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 20U, uint8_t); - Lib_Memzero0_memzero(v, 20U, uint8_t); + Lib_Memzero0_memzero(k, 20U, uint8_t, void *); + Lib_Memzero0_memzero(v, 20U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); @@ -1794,8 +1794,8 @@ static void uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 32U, uint8_t); - Lib_Memzero0_memzero(v, 32U, uint8_t); + Lib_Memzero0_memzero(k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(v, 32U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); @@ -1818,8 +1818,8 @@ static void uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 48U, uint8_t); - Lib_Memzero0_memzero(v, 48U, uint8_t); + Lib_Memzero0_memzero(k, 48U, uint8_t, void *); + Lib_Memzero0_memzero(v, 48U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); @@ -1842,8 +1842,8 @@ static void uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 64U, uint8_t); - Lib_Memzero0_memzero(v, 64U, uint8_t); + Lib_Memzero0_memzero(k, 64U, uint8_t, void *); + Lib_Memzero0_memzero(v, 64U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); diff --git a/src/EverCrypt_Hash.c b/src/EverCrypt_Hash.c index 92b3c227..bfafa9be 100644 --- a/src/EverCrypt_Hash.c +++ b/src/EverCrypt_Hash.c @@ -709,25 +709,57 @@ static void finish(EverCrypt_Hash_state_s *s, uint8_t *dst) if (scrut.tag == SHA3_224_s) { uint64_t *p1 = scrut.case_SHA3_224_s; - Hacl_Hash_SHA3_squeeze0(p1, 144U, 28U, dst); + uint32_t remOut = 28U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == SHA3_256_s) { uint64_t *p1 = scrut.case_SHA3_256_s; - Hacl_Hash_SHA3_squeeze0(p1, 136U, 32U, dst); + uint32_t remOut = 32U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == SHA3_384_s) { uint64_t *p1 = scrut.case_SHA3_384_s; - Hacl_Hash_SHA3_squeeze0(p1, 104U, 48U, dst); + uint32_t remOut = 48U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == SHA3_512_s) { uint64_t *p1 = scrut.case_SHA3_512_s; - Hacl_Hash_SHA3_squeeze0(p1, 72U, 64U, dst); + uint32_t remOut = 64U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == Blake2S_s) diff --git a/src/Hacl_Ed25519.c b/src/Hacl_Ed25519.c index 05d96cd0..d1f8edf2 100644 --- a/src/Hacl_Ed25519.c +++ b/src/Hacl_Ed25519.c @@ -1712,8 +1712,8 @@ static inline void secret_expand(uint8_t *expanded, uint8_t *secret) /** Compute the public key from the private key. - The outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] public_key Points to 32 bytes of valid memory, i.e., `uint8_t[32]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. */ void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key) { @@ -1726,8 +1726,8 @@ void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key) /** Compute the expanded keys for an Ed25519 signature. - The outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -1744,11 +1744,10 @@ void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key) /** Create an Ed25519 signature with the (precomputed) expanded keys. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The argument `expanded_keys` is obtained through `expand_keys`. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `expanded_keys` nor `msg`. + @param[in] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`, containing the expanded keys obtained by invoking `expand_keys`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -1783,9 +1782,10 @@ Hacl_Ed25519_sign_expanded( /** Create an Ed25519 signature. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `private_key` nor `msg`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. The function first calls `expand_keys` and then invokes `sign_expanded`. @@ -1803,11 +1803,12 @@ Hacl_Ed25519_sign(uint8_t *signature, uint8_t *private_key, uint32_t msg_len, ui /** Verify an Ed25519 signature. - The function returns `true` if the signature is valid and `false` otherwise. + @param public_key Points to 32 bytes of valid memory containing the public key, i.e., `uint8_t[32]`. + @param msg_len Length of `msg`. + @param msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. + @param signature Points to 64 bytes of valid memory containing the signature, i.e., `uint8_t[64]`. - The argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. + @return Returns `true` if the signature is valid and `false` otherwise. */ bool Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t *signature) diff --git a/src/Hacl_Frodo1344.c b/src/Hacl_Frodo1344.c index a565a85b..9fe78471 100644 --- a/src/Hacl_Frodo1344.c +++ b/src/Hacl_Frodo1344.c @@ -45,7 +45,7 @@ uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 32U; uint8_t *z = coins + 64U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake256(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 21552U; uint16_t s_matrix[10752U] = { 0U }; @@ -54,8 +54,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[33U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43008U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 43008U, shake_input_seed_se, 33U); + Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r + 21504U, e_matrix); uint16_t b_matrix[10752U] = { 0U }; @@ -66,14 +66,14 @@ uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(1344U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(1344U, 8U, 16U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(1344U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t, void *); uint32_t slen1 = 43056U; uint8_t *sk_p = sk; memcpy(sk_p, s, 32U * sizeof (uint8_t)); memcpy(sk_p + 32U, pk, 21520U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, sk + slen1); - Lib_Memzero0_memzero(coins, 80U, uint8_t); + Hacl_Hash_SHA3_shake256(sk + slen1, 32U, pk, 21520U); + Lib_Memzero0_memzero(coins, 80U, uint8_t, void *); return 0U; } @@ -83,9 +83,9 @@ uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(32U, coins); uint8_t seed_se_k[64U] = { 0U }; uint8_t pkh_mu[64U] = { 0U }; - Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, pkh_mu); + Hacl_Hash_SHA3_shake256(pkh_mu, 32U, pk, 21520U); memcpy(pkh_mu + 32U, coins, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(64U, pkh_mu, 64U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 64U, pkh_mu, 64U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 32U; uint8_t *seed_a = pk; @@ -97,8 +97,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[33U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 43136U, shake_input_seed_se, 33U); + Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix); @@ -119,22 +119,22 @@ uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 21664U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t shake_input_ss[ss_init_len]; memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 21632U * sizeof (uint8_t)); memcpy(shake_input_ss + 21632U, k, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 32U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t); - Lib_Memzero0_memzero(coins, 32U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 32U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 32U, uint8_t, void *); return 0U; } @@ -154,8 +154,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 1344U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 4U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[64U] = { 0U }; uint32_t pkh_mu_decode_len = 64U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -164,7 +164,7 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 43056U; memcpy(pkh_mu_decode, pkh, 32U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 32U, mu_decode, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 64U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 64U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 32U; uint8_t *s = sk; @@ -177,8 +177,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[33U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 43136U, shake_input_seed_se, 33U); + Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix); @@ -197,12 +197,12 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 1344U, 16U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 1344U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -222,11 +222,11 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 21632U * sizeof (uint8_t)); memcpy(ss_init + 21632U, kp_s, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 32U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 32U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 32U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 32U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 32U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 32U, uint8_t, void *); return 0U; } diff --git a/src/Hacl_Frodo64.c b/src/Hacl_Frodo64.c index 91434038..19f1562d 100644 --- a/src/Hacl_Frodo64.c +++ b/src/Hacl_Frodo64.c @@ -50,7 +50,7 @@ uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 16U; uint8_t *z = coins + 32U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake128(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 992U; uint16_t s_matrix[512U] = { 0U }; @@ -59,8 +59,8 @@ uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2048U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 2048U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r + 1024U, e_matrix); uint16_t b_matrix[512U] = { 0U }; @@ -70,14 +70,14 @@ uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(64U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(64U, 8U, 15U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(64U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 512U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 512U, uint16_t, void *); uint32_t slen1 = 2016U; uint8_t *sk_p = sk; memcpy(sk_p, s, 16U * sizeof (uint8_t)); memcpy(sk_p + 16U, pk, 976U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, sk + slen1); - Lib_Memzero0_memzero(coins, 48U, uint8_t); + Hacl_Hash_SHA3_shake128(sk + slen1, 16U, pk, 976U); + Lib_Memzero0_memzero(coins, 48U, uint8_t, void *); return 0U; } @@ -87,9 +87,9 @@ uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(16U, coins); uint8_t seed_se_k[32U] = { 0U }; uint8_t pkh_mu[32U] = { 0U }; - Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, pkh_mu); + Hacl_Hash_SHA3_shake128(pkh_mu, 16U, pk, 976U); memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu, 32U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 16U; uint8_t *seed_a = pk; @@ -101,8 +101,8 @@ uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 2176U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix); @@ -122,22 +122,22 @@ uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 1096U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t shake_input_ss[ss_init_len]; memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 1080U * sizeof (uint8_t)); memcpy(shake_input_ss + 1080U, k, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(coins, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 16U, uint8_t, void *); return 0U; } @@ -157,8 +157,8 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 64U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[32U] = { 0U }; uint32_t pkh_mu_decode_len = 32U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -167,7 +167,7 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 2016U; memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 16U; uint8_t *s = sk; @@ -180,8 +180,8 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 2176U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix); @@ -199,12 +199,12 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 64U, 15U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 64U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -225,11 +225,11 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 1080U * sizeof (uint8_t)); memcpy(ss_init + 1080U, kp_s, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 16U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 16U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 16U, uint8_t, void *); return 0U; } diff --git a/src/Hacl_Frodo640.c b/src/Hacl_Frodo640.c index 8baaee46..8cf0253e 100644 --- a/src/Hacl_Frodo640.c +++ b/src/Hacl_Frodo640.c @@ -45,7 +45,7 @@ uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 16U; uint8_t *z = coins + 32U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake128(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 9632U; uint16_t s_matrix[5120U] = { 0U }; @@ -54,8 +54,8 @@ uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20480U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 20480U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r + 10240U, e_matrix); uint16_t b_matrix[5120U] = { 0U }; @@ -66,14 +66,14 @@ uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(640U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(640U, 8U, 15U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(640U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t, void *); uint32_t slen1 = 19872U; uint8_t *sk_p = sk; memcpy(sk_p, s, 16U * sizeof (uint8_t)); memcpy(sk_p + 16U, pk, 9616U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, sk + slen1); - Lib_Memzero0_memzero(coins, 48U, uint8_t); + Hacl_Hash_SHA3_shake128(sk + slen1, 16U, pk, 9616U); + Lib_Memzero0_memzero(coins, 48U, uint8_t, void *); return 0U; } @@ -83,9 +83,9 @@ uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(16U, coins); uint8_t seed_se_k[32U] = { 0U }; uint8_t pkh_mu[32U] = { 0U }; - Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, pkh_mu); + Hacl_Hash_SHA3_shake128(pkh_mu, 16U, pk, 9616U); memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu, 32U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 16U; uint8_t *seed_a = pk; @@ -97,8 +97,8 @@ uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 20608U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix); @@ -119,22 +119,22 @@ uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 9736U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t shake_input_ss[ss_init_len]; memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 9720U * sizeof (uint8_t)); memcpy(shake_input_ss + 9720U, k, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(coins, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 16U, uint8_t, void *); return 0U; } @@ -154,8 +154,8 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 640U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[32U] = { 0U }; uint32_t pkh_mu_decode_len = 32U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -164,7 +164,7 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 19872U; memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 16U; uint8_t *s = sk; @@ -177,8 +177,8 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 20608U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix); @@ -197,12 +197,12 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 640U, 15U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 640U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -223,11 +223,11 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 9720U * sizeof (uint8_t)); memcpy(ss_init + 9720U, kp_s, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 16U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 16U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 16U, uint8_t, void *); return 0U; } diff --git a/src/Hacl_Frodo976.c b/src/Hacl_Frodo976.c index 76d78a30..9360e3af 100644 --- a/src/Hacl_Frodo976.c +++ b/src/Hacl_Frodo976.c @@ -45,7 +45,7 @@ uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 24U; uint8_t *z = coins + 48U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake256(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 15656U; uint16_t s_matrix[7808U] = { 0U }; @@ -54,8 +54,8 @@ uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[25U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31232U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 31232U, shake_input_seed_se, 25U); + Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r + 15616U, e_matrix); uint16_t b_matrix[7808U] = { 0U }; @@ -66,14 +66,14 @@ uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(976U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(976U, 8U, 16U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(976U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t, void *); uint32_t slen1 = 31272U; uint8_t *sk_p = sk; memcpy(sk_p, s, 24U * sizeof (uint8_t)); memcpy(sk_p + 24U, pk, 15632U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, sk + slen1); - Lib_Memzero0_memzero(coins, 64U, uint8_t); + Hacl_Hash_SHA3_shake256(sk + slen1, 24U, pk, 15632U); + Lib_Memzero0_memzero(coins, 64U, uint8_t, void *); return 0U; } @@ -83,9 +83,9 @@ uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(24U, coins); uint8_t seed_se_k[48U] = { 0U }; uint8_t pkh_mu[48U] = { 0U }; - Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, pkh_mu); + Hacl_Hash_SHA3_shake256(pkh_mu, 24U, pk, 15632U); memcpy(pkh_mu + 24U, coins, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(48U, pkh_mu, 48U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 48U, pkh_mu, 48U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 24U; uint8_t *seed_a = pk; @@ -97,8 +97,8 @@ uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[25U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 31360U, shake_input_seed_se, 25U); + Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix); @@ -119,22 +119,22 @@ uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 15768U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t shake_input_ss[ss_init_len]; memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 15744U * sizeof (uint8_t)); memcpy(shake_input_ss + 15744U, k, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 24U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t); - Lib_Memzero0_memzero(coins, 24U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 24U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 24U, uint8_t, void *); return 0U; } @@ -154,8 +154,8 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 976U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 3U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[48U] = { 0U }; uint32_t pkh_mu_decode_len = 48U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -164,7 +164,7 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 31272U; memcpy(pkh_mu_decode, pkh, 24U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 24U, mu_decode, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 48U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 48U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 24U; uint8_t *s = sk; @@ -177,8 +177,8 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[25U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 31360U, shake_input_seed_se, 25U); + Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix); @@ -197,12 +197,12 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 976U, 16U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 976U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -222,11 +222,11 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 15744U * sizeof (uint8_t)); memcpy(ss_init + 15744U, kp_s, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 24U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 24U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 24U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 24U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 24U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 24U, uint8_t, void *); return 0U; } diff --git a/src/Hacl_Hash_Blake2b.c b/src/Hacl_Hash_Blake2b.c index 2dceaf4b..d490a1a5 100644 --- a/src/Hacl_Hash_Blake2b.c +++ b/src/Hacl_Hash_Blake2b.c @@ -76,22 +76,22 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl uint64_t *r1 = m_st + 4U; uint64_t *r20 = m_st + 8U; uint64_t *r30 = m_st + 12U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; uint64_t uu____0 = m_w[s2]; uint64_t uu____1 = m_w[s4]; uint64_t uu____2 = m_w[s6]; @@ -474,18 +474,27 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) { + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = 64U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + uint64_t tmp[8U] = { 0U }; uint64_t *r0 = hash; uint64_t *r1 = hash + 4U; uint64_t *r2 = hash + 8U; uint64_t *r3 = hash + 12U; - uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U]; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; r2[0U] = iv0; r2[1U] = iv1; r2[2U] = iv2; @@ -494,16 +503,141 @@ void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; - uint64_t kk_shift_8 = (uint64_t)kk << 8U; - uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn)); + uint8_t kk1 = (uint8_t)kk; + uint8_t nn1 = (uint8_t)nn; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk1 + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; +} + +static void init_with_params(uint64_t *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = hash; + uint64_t *r1 = hash + 4U; + uint64_t *r2 = hash + 8U; + uint64_t *r3 = hash + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk = p.key_length; + uint8_t nn = p.digest_length; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; } static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) @@ -519,7 +653,7 @@ static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, ui { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } void @@ -560,7 +694,7 @@ Hacl_Hash_Blake2b_update_last( FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } static void @@ -624,43 +758,223 @@ void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash) KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(second + i * 8U, row1[i]);); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void) +static Hacl_Hash_Blake2b_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); - Hacl_Hash_Blake2b_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2b_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2b_state_t *p = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t)); p[0U] = s; - Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + +*/ +Hacl_Hash_Blake2b_state_t +*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + */ -void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state) +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 64U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + Hacl_Hash_Blake2b_state_t *s = Hacl_Hash_Blake2b_malloc_with_params_and_key(&p0, k); + return s; +} + +/** + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void) +{ + return Hacl_Hash_Blake2b_malloc_with_key(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_state_t *s) +{ + Hacl_Hash_Blake2b_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2b_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2b_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + General-purpose re-initialization function with parameters and +key. You cannot change digest_length or key_length, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. +*/ +void +Hacl_Hash_Blake2b_reset_with_key_and_params( + Hacl_Hash_Blake2b_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. +*/ +void Hacl_Hash_Blake2b_reset_with_key(Hacl_Hash_Blake2b_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. +*/ +void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *s) +{ + Hacl_Hash_Blake2b_reset_with_key(s, NULL); +} + +/** + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len) @@ -726,8 +1040,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_update_multi(128U, wv, @@ -750,8 +1065,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_update_multi(data1_len, wv, @@ -817,8 +1133,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_update_multi(128U, wv, @@ -842,8 +1159,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_update_multi(data1_len, wv, @@ -867,10 +1185,20 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your +parameters. */ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2b_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2b_state_t scrut = *state; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -887,9 +1215,11 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) uint8_t *buf_1 = buf_; uint64_t wv0[16U] = { 0U }; uint64_t b[16U] = { 0U }; - Hacl_Hash_Blake2b_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - uint64_t *src_b = block_state.snd; - uint64_t *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2b_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + uint64_t *src_b = block_state.thd.snd; + uint64_t *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -903,8 +1233,9 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - uint64_t *wv1 = tmp_block_state.fst; - uint64_t *hash0 = tmp_block_state.snd; + K____uint64_t___uint64_t_ acc0 = tmp_block_state.thd; + uint64_t *wv1 = acc0.fst; + uint64_t *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2b_update_multi(0U, wv1, @@ -913,15 +1244,17 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - uint64_t *wv = tmp_block_state.fst; - uint64_t *hash = tmp_block_state.snd; + K____uint64_t___uint64_t_ acc = tmp_block_state.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; Hacl_Hash_Blake2b_update_last(r, wv, hash, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - Hacl_Hash_Blake2b_finish(64U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2b_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -932,14 +1265,43 @@ void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state) Hacl_Hash_Blake2b_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; - uint64_t *wv = block_state.fst; - uint64_t *b = block_state.snd; + uint64_t *b = block_state.thd.snd; + uint64_t *wv = block_state.thd.fst; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. This preserves all parameters. +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_copy(Hacl_Hash_Blake2b_state_t *state) +{ + Hacl_Hash_Blake2b_state_t scrut = *state; + Hacl_Hash_Blake2b_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); + memcpy(buf, buf0, 128U * sizeof (uint8_t)); + uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); + uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); + Hacl_Hash_Blake2b_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + uint64_t *src_b = block_state0.thd.snd; + uint64_t *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); + Hacl_Hash_Blake2b_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2b_state_t + *p = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t)); + p[0U] = s; + return p; +} + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -965,7 +1327,109 @@ Hacl_Hash_Blake2b_hash_with_key( Hacl_Hash_Blake2b_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2b_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 16U, uint64_t); - Lib_Memzero0_memzero(b, 16U, uint64_t); + Lib_Memzero0_memzero(b1, 16U, uint64_t, void *); + Lib_Memzero0_memzero(b, 16U, uint64_t, void *); +} + +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ +void +Hacl_Hash_Blake2b_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + uint64_t b[16U] = { 0U }; + uint64_t b1[16U] = { 0U }; + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = b; + uint64_t *r1 = b + 4U; + uint64_t *r2 = b + 8U; + uint64_t *r3 = b + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk = params.key_length; + uint8_t nn = params.digest_length; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ + ((uint64_t)params.fanout + << 16U + ^ ((uint64_t)params.depth << 24U ^ (uint64_t)params.leaf_length << 32U))); + tmp[1U] = params.node_offset; + tmp[2U] = (uint64_t)params.node_depth ^ (uint64_t)params.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2b_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 16U, uint64_t, void *); + Lib_Memzero0_memzero(b, 16U, uint64_t, void *); } diff --git a/src/Hacl_Hash_Blake2b_Simd256.c b/src/Hacl_Hash_Blake2b_Simd256.c index 1a5e8cf2..0afd93bc 100644 --- a/src/Hacl_Hash_Blake2b_Simd256.c +++ b/src/Hacl_Hash_Blake2b_Simd256.c @@ -26,6 +26,7 @@ #include "internal/Hacl_Hash_Blake2b_Simd256.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "lib_memzero0.h" static inline void @@ -77,22 +78,22 @@ update_block( Lib_IntVector_Intrinsics_vec256 *r1 = m_st + 1U; Lib_IntVector_Intrinsics_vec256 *r20 = m_st + 2U; Lib_IntVector_Intrinsics_vec256 *r30 = m_st + 3U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); @@ -214,24 +215,147 @@ update_block( void Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn) { + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = 64U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = hash; + Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk1 = (uint8_t)kk; + uint8_t nn1 = (uint8_t)nn; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk1 + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); +} + +static void +init_with_params(Lib_IntVector_Intrinsics_vec256 *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint64_t tmp[8U] = { 0U }; Lib_IntVector_Intrinsics_vec256 *r0 = hash; Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U; Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U; Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U; - uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U]; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - uint64_t kk_shift_8 = (uint64_t)kk << 8U; - uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn)); - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk = p.key_length; + uint8_t nn = p.digest_length; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); } static void @@ -254,7 +378,7 @@ update_key( { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } void @@ -295,7 +419,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } static inline void @@ -371,7 +495,7 @@ Hacl_Hash_Blake2b_Simd256_finish( Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } void @@ -468,10 +592,11 @@ Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void) return buf; } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) +static Hacl_Hash_Blake2b_Simd256_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec256 @@ -484,33 +609,199 @@ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Hacl_Hash_Blake2b_Simd256_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2b_Simd256_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_Simd256_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2b_Simd256_state_t *p = (Hacl_Hash_Blake2b_Simd256_state_t *)KRML_HOST_MALLOC(sizeof ( Hacl_Hash_Blake2b_Simd256_state_t )); p[0U] = s; - Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (256 for S, 64 for B).) +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (256 for S, 64 for B). +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 64U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); + uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params + *p0 = + (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); + p0[0U] = p; + Hacl_Hash_Blake2b_Simd256_state_t + *s = Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key(p0, k); + Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; + KRML_HOST_FREE(p1.salt); + KRML_HOST_FREE(p1.personal); + KRML_HOST_FREE(p0); + return s; +} + +/** + State allocation function when there is no key */ -void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state) +Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) +{ + return Hacl_Hash_Blake2b_Simd256_malloc_with_key0(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_Simd256_state_t *s) +{ + Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2b_Simd256_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_Simd256_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( + Hacl_Hash_Blake2b_Simd256_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Re-initialization function when there is no key +*/ +void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s) +{ + Hacl_Hash_Blake2b_Simd256_reset_with_key(s, NULL); +} + /** Update function when there is no key; 0 = success, 1 = max length exceeded */ @@ -582,8 +873,10 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_Simd256_update_multi(128U, wv, @@ -606,8 +899,9 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_Simd256_update_multi(data1_len, wv, @@ -673,8 +967,10 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_Simd256_update_multi(128U, wv, @@ -698,8 +994,9 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_Simd256_update_multi(data1_len, wv, @@ -728,6 +1025,10 @@ Hacl_Hash_Blake2b_Simd256_update( void Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -744,9 +1045,11 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 uint8_t *buf_1 = buf_; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; - Hacl_Hash_Blake2b_Simd256_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd; - Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2b_Simd256_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -760,8 +1063,10 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc0 = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec256 *wv1 = acc0.fst; + Lib_IntVector_Intrinsics_vec256 *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2b_Simd256_update_multi(0U, wv1, @@ -770,15 +1075,18 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - Lib_IntVector_Intrinsics_vec256 *wv = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec256 *hash = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; Hacl_Hash_Blake2b_Simd256_update_last(r, wv, hash, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - Hacl_Hash_Blake2b_Simd256_finish(64U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2b_Simd256_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -789,14 +1097,55 @@ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state) Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec256 *wv = block_state.fst; - Lib_IntVector_Intrinsics_vec256 *b = block_state.snd; + Lib_IntVector_Intrinsics_vec256 *b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.thd.fst; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state) +{ + Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); + memcpy(buf, buf0, 128U * sizeof (uint8_t)); + Lib_IntVector_Intrinsics_vec256 + *wv = + (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, + sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); + memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 + *b = + (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, + sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); + memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Hacl_Hash_Blake2b_Simd256_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state0.thd.snd; + Lib_IntVector_Intrinsics_vec256 *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Hacl_Hash_Blake2b_Simd256_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2b_Simd256_state_t + *p = + (Hacl_Hash_Blake2b_Simd256_state_t *)KRML_HOST_MALLOC(sizeof ( + Hacl_Hash_Blake2b_Simd256_state_t + )); + p[0U] = s; + return p; +} + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -822,7 +1171,91 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( Hacl_Hash_Blake2b_Simd256_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2b_Simd256_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256); - Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256, void *); +} + +void +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b1[4U] KRML_POST_ALIGN(32) = { 0U }; + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = b; + Lib_IntVector_Intrinsics_vec256 *r1 = b + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = b + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = b + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk = params.key_length; + uint8_t nn = params.digest_length; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ + ((uint64_t)params.fanout + << 16U + ^ ((uint64_t)params.depth << 24U ^ (uint64_t)params.leaf_length << 32U))); + tmp[1U] = params.node_offset; + tmp[2U] = (uint64_t)params.node_depth ^ (uint64_t)params.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2b_Simd256_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256, void *); } diff --git a/src/Hacl_Hash_Blake2s.c b/src/Hacl_Hash_Blake2s.c index 652c3f33..6e19d83d 100644 --- a/src/Hacl_Hash_Blake2s.c +++ b/src/Hacl_Hash_Blake2s.c @@ -26,6 +26,7 @@ #include "internal/Hacl_Hash_Blake2s.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "lib_memzero0.h" static inline void @@ -76,22 +77,22 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * uint32_t *r1 = m_st + 4U; uint32_t *r20 = m_st + 8U; uint32_t *r30 = m_st + 12U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; uint32_t uu____0 = m_w[s2]; uint32_t uu____1 = m_w[s4]; uint32_t uu____2 = m_w[s6]; @@ -474,18 +475,104 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) { + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = hash; + uint32_t *r1 = hash + 4U; + uint32_t *r2 = hash + 8U; + uint32_t *r3 = hash + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)(uint8_t)nn + ^ ((uint32_t)(uint8_t)kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; +} + +static void init_with_params(uint32_t *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint32_t tmp[8U] = { 0U }; uint32_t *r0 = hash; uint32_t *r1 = hash + 4U; uint32_t *r2 = hash + 8U; uint32_t *r3 = hash + 12U; - uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U]; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; r2[0U] = iv0; r2[1U] = iv1; r2[2U] = iv2; @@ -494,16 +581,58 @@ void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; - uint32_t kk_shift_8 = kk << 8U; - uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn)); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)p.digest_length + ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; } static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) @@ -519,7 +648,7 @@ static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, ui { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } void @@ -556,7 +685,7 @@ Hacl_Hash_Blake2s_update_last( memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } static void @@ -614,41 +743,203 @@ void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash) KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(second + i * 4U, row1[i]);); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 32U, uint8_t); + Lib_Memzero0_memzero(b, 32U, uint8_t, void *); } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) +static Hacl_Hash_Blake2s_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); - Hacl_Hash_Blake2s_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2s_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2s_state_t *p = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t)); p[0U] = s; - Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (32 for S, 64 for B).) +*/ +Hacl_Hash_Blake2s_state_t +*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (32 for S, 64 for B). */ -void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state) +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 32U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params + *p0 = + (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); + p0[0U] = p; + Hacl_Hash_Blake2s_state_t *s = Hacl_Hash_Blake2s_malloc_with_params_and_key(p0, k); + Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; + KRML_HOST_FREE(p1.salt); + KRML_HOST_FREE(p1.personal); + KRML_HOST_FREE(p0); + return s; +} + +/** + State allocation function when there is no key +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) +{ + return Hacl_Hash_Blake2s_malloc_with_key(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_state_t *s) +{ + Hacl_Hash_Blake2s_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2s_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2s_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_reset_with_key_and_params( + Hacl_Hash_Blake2s_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Re-initialization function when there is no key +*/ +void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s) +{ + Hacl_Hash_Blake2s_reset_with_key(s, NULL); +} + /** Update function when there is no key; 0 = success, 1 = max length exceeded */ @@ -716,8 +1007,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -735,8 +1027,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -797,8 +1090,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -817,8 +1111,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -841,6 +1136,10 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 */ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2s_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2s_state_t scrut = *state; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -857,9 +1156,11 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) uint8_t *buf_1 = buf_; uint32_t wv0[16U] = { 0U }; uint32_t b[16U] = { 0U }; - Hacl_Hash_Blake2s_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - uint32_t *src_b = block_state.snd; - uint32_t *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2s_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + uint32_t *src_b = block_state.thd.snd; + uint32_t *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -873,15 +1174,18 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - uint32_t *wv1 = tmp_block_state.fst; - uint32_t *hash0 = tmp_block_state.snd; + K____uint32_t___uint32_t_ acc0 = tmp_block_state.thd; + uint32_t *wv1 = acc0.fst; + uint32_t *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - uint32_t *wv = tmp_block_state.fst; - uint32_t *hash = tmp_block_state.snd; + K____uint32_t___uint32_t_ acc = tmp_block_state.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; Hacl_Hash_Blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last); - Hacl_Hash_Blake2s_finish(32U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2s_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -892,19 +1196,48 @@ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state) Hacl_Hash_Blake2s_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; - uint32_t *wv = block_state.fst; - uint32_t *b = block_state.snd; + uint32_t *b = block_state.thd.snd; + uint32_t *wv = block_state.thd.fst; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state) +{ + Hacl_Hash_Blake2s_state_t scrut = *state; + Hacl_Hash_Blake2s_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); + memcpy(buf, buf0, 64U * sizeof (uint8_t)); + uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); + uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); + Hacl_Hash_Blake2s_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + uint32_t *src_b = block_state0.thd.snd; + uint32_t *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); + Hacl_Hash_Blake2s_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2s_state_t + *p = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t)); + p[0U] = s; + return p; +} + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -925,7 +1258,100 @@ Hacl_Hash_Blake2s_hash_with_key( Hacl_Hash_Blake2s_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2s_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 16U, uint32_t); - Lib_Memzero0_memzero(b, 16U, uint32_t); + Lib_Memzero0_memzero(b1, 16U, uint32_t, void *); + Lib_Memzero0_memzero(b, 16U, uint32_t, void *); +} + +void +Hacl_Hash_Blake2s_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + uint32_t b[16U] = { 0U }; + uint32_t b1[16U] = { 0U }; + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = b; + uint32_t *r1 = b + 4U; + uint32_t *r2 = b + 8U; + uint32_t *r3 = b + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)params.digest_length + ^ + ((uint32_t)params.key_length + << 8U + ^ ((uint32_t)params.fanout << 16U ^ (uint32_t)params.depth << 24U)); + tmp[1U] = params.leaf_length; + tmp[2U] = (uint32_t)params.node_offset; + tmp[3U] = + (uint32_t)(params.node_offset >> 32U) + ^ ((uint32_t)params.node_depth << 16U ^ (uint32_t)params.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2s_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 16U, uint32_t, void *); + Lib_Memzero0_memzero(b, 16U, uint32_t, void *); } diff --git a/src/Hacl_Hash_Blake2s_Simd128.c b/src/Hacl_Hash_Blake2s_Simd128.c index 73f0cccb..c02da8fa 100644 --- a/src/Hacl_Hash_Blake2s_Simd128.c +++ b/src/Hacl_Hash_Blake2s_Simd128.c @@ -26,6 +26,7 @@ #include "internal/Hacl_Hash_Blake2s_Simd128.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "lib_memzero0.h" static inline void @@ -77,22 +78,22 @@ update_block( Lib_IntVector_Intrinsics_vec128 *r1 = m_st + 1U; Lib_IntVector_Intrinsics_vec128 *r20 = m_st + 2U; Lib_IntVector_Intrinsics_vec128 *r30 = m_st + 3U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); @@ -214,24 +215,141 @@ update_block( void Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn) { + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = hash; + Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)(uint8_t)nn + ^ ((uint32_t)(uint8_t)kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); +} + +static void +init_with_params(Lib_IntVector_Intrinsics_vec128 *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint32_t tmp[8U] = { 0U }; Lib_IntVector_Intrinsics_vec128 *r0 = hash; Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U; - uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U]; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - uint32_t kk_shift_8 = kk << 8U; - uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn)); - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)p.digest_length + ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); } static void @@ -254,7 +372,7 @@ update_key( { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } void @@ -291,7 +409,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } static inline void @@ -367,7 +485,7 @@ Hacl_Hash_Blake2s_Simd128_finish( Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 32U, uint8_t); + Lib_Memzero0_memzero(b, 32U, uint8_t, void *); } void @@ -464,10 +582,11 @@ Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void) return buf; } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) +static Hacl_Hash_Blake2s_Simd128_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec128 @@ -480,33 +599,199 @@ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - Hacl_Hash_Blake2s_Simd128_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2s_Simd128_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_Simd128_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2s_Simd128_state_t *p = (Hacl_Hash_Blake2s_Simd128_state_t *)KRML_HOST_MALLOC(sizeof ( Hacl_Hash_Blake2s_Simd128_state_t )); p[0U] = s; - Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (128 for S, 64 for B).) +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (128 for S, 64 for B). +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 32U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params + *p0 = + (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); + p0[0U] = p; + Hacl_Hash_Blake2s_Simd128_state_t + *s = Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key(p0, k); + Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; + KRML_HOST_FREE(p1.salt); + KRML_HOST_FREE(p1.personal); + KRML_HOST_FREE(p0); + return s; +} + +/** + State allocation function when there is no key */ -void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state) +Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) +{ + return Hacl_Hash_Blake2s_Simd128_malloc_with_key0(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_Simd128_state_t *s) +{ + Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2s_Simd128_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_Simd128_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( + Hacl_Hash_Blake2s_Simd128_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Re-initialization function when there is no key +*/ +void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s) +{ + Hacl_Hash_Blake2s_Simd128_reset_with_key(s, NULL); +} + /** Update function when there is no key; 0 = success, 1 = max length exceeded */ @@ -578,8 +863,10 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -597,8 +884,9 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -659,8 +947,10 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -679,8 +969,9 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -704,6 +995,10 @@ Hacl_Hash_Blake2s_Simd128_update( void Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -720,9 +1015,11 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 uint8_t *buf_1 = buf_; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Hash_Blake2s_Simd128_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd; - Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2s_Simd128_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -736,15 +1033,20 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc0 = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec128 *wv1 = acc0.fst; + Lib_IntVector_Intrinsics_vec128 *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2s_Simd128_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, prev_len_last, r, buf_last); - Hacl_Hash_Blake2s_Simd128_finish(32U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2s_Simd128_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -755,19 +1057,60 @@ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state) Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec128 *wv = block_state.fst; - Lib_IntVector_Intrinsics_vec128 *b = block_state.snd; + Lib_IntVector_Intrinsics_vec128 *b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.thd.fst; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state) +{ + Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); + memcpy(buf, buf0, 64U * sizeof (uint8_t)); + Lib_IntVector_Intrinsics_vec128 + *wv = + (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, + sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); + memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); + Lib_IntVector_Intrinsics_vec128 + *b = + (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, + sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); + memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); + Hacl_Hash_Blake2s_Simd128_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state0.thd.snd; + Lib_IntVector_Intrinsics_vec128 *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); + Hacl_Hash_Blake2s_Simd128_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2s_Simd128_state_t + *p = + (Hacl_Hash_Blake2s_Simd128_state_t *)KRML_HOST_MALLOC(sizeof ( + Hacl_Hash_Blake2s_Simd128_state_t + )); + p[0U] = s; + return p; +} + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -788,7 +1131,88 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( Hacl_Hash_Blake2s_Simd128_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2s_Simd128_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128); - Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128, void *); +} + +void +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; + KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b1[4U] KRML_POST_ALIGN(16) = { 0U }; + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = b; + Lib_IntVector_Intrinsics_vec128 *r1 = b + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = b + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = b + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)params.digest_length + ^ + ((uint32_t)params.key_length + << 8U + ^ ((uint32_t)params.fanout << 16U ^ (uint32_t)params.depth << 24U)); + tmp[1U] = params.leaf_length; + tmp[2U] = (uint32_t)params.node_offset; + tmp[3U] = + (uint32_t)(params.node_offset >> 32U) + ^ ((uint32_t)params.node_depth << 16U ^ (uint32_t)params.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2s_Simd128_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128, void *); } diff --git a/src/Hacl_Hash_SHA3.c b/src/Hacl_Hash_SHA3.c index 4f502866..89bb0491 100644 --- a/src/Hacl_Hash_SHA3.c +++ b/src/Hacl_Hash_SHA3.c @@ -25,6 +25,151 @@ #include "internal/Hacl_Hash_SHA3.h" +const +uint32_t +Hacl_Hash_SHA3_keccak_rotc[24U] = + { + 1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U, + 39U, 61U, 20U, 44U + }; + +const +uint32_t +Hacl_Hash_SHA3_keccak_piln[24U] = + { + 10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U, + 22U, 9U, 6U, 1U + }; + +const +uint64_t +Hacl_Hash_SHA3_keccak_rndc[24U] = + { + 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL + }; + +static void absorb_inner_32(uint8_t *b, uint64_t *s) +{ + uint64_t ws[32U] = { 0U }; + uint8_t *b1 = b; + uint64_t u = load64_le(b1); + ws[0U] = u; + uint64_t u0 = load64_le(b1 + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b1 + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b1 + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b1 + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b1 + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b1 + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b1 + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b1 + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b1 + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b1 + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b1 + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b1 + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b1 + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b1 + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b1 + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b1 + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b1 + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b1 + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b1 + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b1 + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b1 + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b1 + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b1 + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b1 + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b1 + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b1 + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b1 + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b1 + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b1 + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b1 + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b1 + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } +} + static uint32_t block_len(Spec_Hash_Definitions_hash_alg a) { switch (a) @@ -97,10 +242,17 @@ Hacl_Hash_SHA3_update_multi_sha3( uint32_t n_blocks ) { - for (uint32_t i = 0U; i < n_blocks; i++) + uint32_t l = block_len(a) * n_blocks; + for (uint32_t i = 0U; i < l / block_len(a); i++) { - uint8_t *block = blocks + i * block_len(a); - Hacl_Hash_SHA3_absorb_inner(block_len(a), block, s); + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = blocks; + uint8_t *bl0 = b_; + uint8_t *uu____0 = b0 + i * block_len(a); + memcpy(bl0, uu____0, block_len(a) * sizeof (uint8_t)); + block_len(a); + absorb_inner_32(b_, s); } } @@ -124,37 +276,272 @@ Hacl_Hash_SHA3_update_last_sha3( uint32_t len = block_len(a); if (input_len == len) { - Hacl_Hash_SHA3_absorb_inner(len, input, s); - uint8_t lastBlock_[200U] = { 0U }; - uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, input + input_len, 0U * sizeof (uint8_t)); - lastBlock[0U] = suffix; - Hacl_Hash_SHA3_loadState(len, lastBlock, s); - if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U == len - 1U) - { - Hacl_Hash_SHA3_state_permute(s); - } - uint8_t nextBlock_[200U] = { 0U }; - uint8_t *nextBlock = nextBlock_; - nextBlock[len - 1U] = 0x80U; - Hacl_Hash_SHA3_loadState(len, nextBlock, s); - Hacl_Hash_SHA3_state_permute(s); + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b00 = input; + uint8_t *bl00 = b_; + memcpy(bl00, b00 + 0U * len, len * sizeof (uint8_t)); + absorb_inner_32(b_, s); + uint8_t b2[256U] = { 0U }; + uint8_t *b_0 = b2; + uint32_t rem = 0U % len; + uint8_t *b01 = input + input_len; + uint8_t *bl0 = b_0; + memcpy(bl0, b01 + 0U - rem, rem * sizeof (uint8_t)); + uint8_t *b02 = b_0; + b02[0U % len] = suffix; + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_0; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U % len == len - 1U) + { + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[len - 1U] = 0x80U; + absorb_inner_32(b4, s); return; } - uint8_t lastBlock_[200U] = { 0U }; - uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, input, input_len * sizeof (uint8_t)); - lastBlock[input_len] = suffix; - Hacl_Hash_SHA3_loadState(len, lastBlock, s); - if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len == len - 1U) + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = input_len % len; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + input_len - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[input_len % len] = suffix; + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) { - Hacl_Hash_SHA3_state_permute(s); + s[i] = s[i] ^ ws[i]; + } + if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len % len == len - 1U) + { + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } } - uint8_t nextBlock_[200U] = { 0U }; - uint8_t *nextBlock = nextBlock_; - nextBlock[len - 1U] = 0x80U; - Hacl_Hash_SHA3_loadState(len, nextBlock, s); - Hacl_Hash_SHA3_state_permute(s); + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[len - 1U] = 0x80U; + absorb_inner_32(b3, s); } typedef struct hash_buf2_s @@ -463,10 +850,139 @@ digest_( uint64_t *s = tmp_block_state.snd; if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256) { - Hacl_Hash_SHA3_squeeze0(s, block_len(a11), l, output); + for (uint32_t i0 = 0U; i0 < l / block_len(a11); i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b0 = output; + uint8_t *uu____0 = hbuf; + memcpy(b0 + i0 * block_len(a11), uu____0, block_len(a11) * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____1 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____1 << 1U | uu____1 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r1 = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____2 = current; + s[_Y] = uu____2 << r1 | uu____2 >> (64U - r1); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = l % block_len(a11); + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + l - remOut, hbuf, remOut * sizeof (uint8_t)); return; } - Hacl_Hash_SHA3_squeeze0(s, block_len(a11), hash_len(a11), output); + for (uint32_t i0 = 0U; i0 < hash_len(a11) / block_len(a11); i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b0 = output; + uint8_t *uu____3 = hbuf; + memcpy(b0 + i0 * block_len(a11), uu____3, block_len(a11) * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r1 = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r1 | uu____5 >> (64U - r1); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = hash_len(a11) % block_len(a11); + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *uu____6 = hbuf; + memcpy(output + hash_len(a11) - remOut, uu____6, remOut * sizeof (uint8_t)); } Hacl_Streaming_Types_error_code @@ -515,78 +1031,79 @@ bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s) return uu____0 == Spec_Hash_Definitions_Shake128 || uu____0 == Spec_Hash_Definitions_Shake256; } -void -Hacl_Hash_SHA3_shake128_hacl( - uint32_t inputByteLen, - uint8_t *input, - uint32_t outputByteLen, - uint8_t *output -) -{ - Hacl_Hash_SHA3_keccak(1344U, 256U, inputByteLen, input, 0x1FU, outputByteLen, output); -} - -void -Hacl_Hash_SHA3_shake256_hacl( - uint32_t inputByteLen, - uint8_t *input, - uint32_t outputByteLen, - uint8_t *output -) -{ - Hacl_Hash_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x1FU, outputByteLen, output); -} - -void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t input_len) -{ - Hacl_Hash_SHA3_keccak(1152U, 448U, input_len, input, 0x06U, 28U, output); -} - -void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t input_len) -{ - Hacl_Hash_SHA3_keccak(1088U, 512U, input_len, input, 0x06U, 32U, output); -} - -void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t input_len) +void Hacl_Hash_SHA3_absorb_inner_32(uint32_t rateInBytes, uint8_t *b, uint64_t *s) { - Hacl_Hash_SHA3_keccak(832U, 768U, input_len, input, 0x06U, 48U, output); -} - -void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t input_len) -{ - Hacl_Hash_SHA3_keccak(576U, 1024U, input_len, input, 0x06U, 64U, output); -} - -static const -uint32_t -keccak_rotc[24U] = - { - 1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U, - 39U, 61U, 20U, 44U - }; - -static const -uint32_t -keccak_piln[24U] = - { - 10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U, - 22U, 9U, 6U, 1U - }; - -static const -uint64_t -keccak_rndc[24U] = + KRML_MAYBE_UNUSED_VAR(rateInBytes); + uint64_t ws[32U] = { 0U }; + uint8_t *b1 = b; + uint64_t u = load64_le(b1); + ws[0U] = u; + uint64_t u0 = load64_le(b1 + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b1 + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b1 + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b1 + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b1 + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b1 + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b1 + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b1 + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b1 + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b1 + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b1 + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b1 + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b1 + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b1 + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b1 + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b1 + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b1 + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b1 + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b1 + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b1 + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b1 + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b1 + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b1 + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b1 + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b1 + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b1 + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b1 + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b1 + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b1 + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b1 + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b1 + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) { - 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL - }; - -void Hacl_Hash_SHA3_state_permute(uint64_t *s) -{ + s[i] = s[i] ^ ws[i]; + } for (uint32_t i0 = 0U; i0 < 24U; i0++) { uint64_t _C[5U] = { 0U }; @@ -606,8 +1123,8 @@ void Hacl_Hash_SHA3_state_permute(uint64_t *s) uint64_t current = x; for (uint32_t i = 0U; i < 24U; i++) { - uint32_t _Y = keccak_piln[i]; - uint32_t r = keccak_rotc[i]; + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; uint64_t uu____1 = current; s[_Y] = uu____1 << r | uu____1 >> (64U - r); @@ -627,108 +1144,1227 @@ void Hacl_Hash_SHA3_state_permute(uint64_t *s) s[2U + 5U * i] = v2; s[3U + 5U * i] = v3; s[4U + 5U * i] = v4;); - uint64_t c = keccak_rndc[i0]; + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; s[0U] = s[0U] ^ c; } } -void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s) +void +Hacl_Hash_SHA3_shake128( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +) { - uint8_t block[200U] = { 0U }; - memcpy(block, input, rateInBytes * sizeof (uint8_t)); + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 168U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; for (uint32_t i = 0U; i < 25U; i++) { - uint64_t u = load64_le(block + i * 8U); - uint64_t x = u; - s[i] = s[i] ^ x; + s[i] = s[i] ^ ws0[i]; } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); } -static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res) +void +Hacl_Hash_SHA3_shake256( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +) { - uint8_t block[200U] = { 0U }; + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; for (uint32_t i = 0U; i < 25U; i++) { - uint64_t sj = s[i]; - store64_le(block + i * 8U, sj); + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); } - memcpy(res, block, rateInBytes * sizeof (uint8_t)); + memcpy(rb + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); } -void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s) +void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen) { - Hacl_Hash_SHA3_loadState(rateInBytes, block, s); - Hacl_Hash_SHA3_state_permute(s); + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 144U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 28U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); } -static void -absorb( - uint64_t *s, - uint32_t rateInBytes, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix -) +void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 32U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 104U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 48U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 72U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 64U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +/** +Allocate state buffer of 200-bytes +*/ +uint64_t *Hacl_Hash_SHA3_state_malloc(void) +{ + uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t)); + return buf; +} + +/** +Free state buffer +*/ +void Hacl_Hash_SHA3_state_free(uint64_t *s) { - uint32_t n_blocks = inputByteLen / rateInBytes; - uint32_t rem = inputByteLen % rateInBytes; - for (uint32_t i = 0U; i < n_blocks; i++) - { - uint8_t *block = input + i * rateInBytes; - Hacl_Hash_SHA3_absorb_inner(rateInBytes, block, s); - } - uint8_t *last = input + n_blocks * rateInBytes; - uint8_t lastBlock_[200U] = { 0U }; - uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, last, rem * sizeof (uint8_t)); - lastBlock[rem] = delimitedSuffix; - Hacl_Hash_SHA3_loadState(rateInBytes, lastBlock, s); - if (!(((uint32_t)delimitedSuffix & 0x80U) == 0U) && rem == rateInBytes - 1U) - { - Hacl_Hash_SHA3_state_permute(s); - } - uint8_t nextBlock_[200U] = { 0U }; - uint8_t *nextBlock = nextBlock_; - nextBlock[rateInBytes - 1U] = 0x80U; - Hacl_Hash_SHA3_loadState(rateInBytes, nextBlock, s); - Hacl_Hash_SHA3_state_permute(s); + KRML_HOST_FREE(s); } +/** +Absorb number of input blocks and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] +*/ void -Hacl_Hash_SHA3_squeeze0( - uint64_t *s, - uint32_t rateInBytes, - uint32_t outputByteLen, - uint8_t *output -) +Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t inputByteLen) { - uint32_t outBlocks = outputByteLen / rateInBytes; - uint32_t remOut = outputByteLen % rateInBytes; - uint8_t *last = output + outputByteLen - remOut; - uint8_t *blocks = output; - for (uint32_t i = 0U; i < outBlocks; i++) + for (uint32_t i = 0U; i < inputByteLen / 168U; i++) { - storeState(rateInBytes, s, blocks + i * rateInBytes); - Hacl_Hash_SHA3_state_permute(s); + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * 168U, 168U * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(168U, b_, state); } - storeState(remOut, s, last); } +/** +Absorb a final partial block of input and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses a sequence of bytes at end of input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffer are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] + + Note: Full size of input buffer must be passed to `inputByteLen` including + the number of full-block bytes at start of input buffer that are ignored +*/ void -Hacl_Hash_SHA3_keccak( - uint32_t rate, - uint32_t capacity, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix, - uint32_t outputByteLen, - uint8_t *output +Hacl_Hash_SHA3_shake128_absorb_final(uint64_t *state, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % 168U; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % 168U] = 0x1FU; + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[167U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(168U, b3, state); +} + +/** +Squeeze a hash state to output buffer + + This function is intended to receive a hash state and output buffer. + It produces an output of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN) points to hash state, i.e., uint64_t[25] + The argument `output` (OUT) points to `outputByteLen` bytes of valid memory, + i.e., uint8_t[outputByteLen] +*/ +void +Hacl_Hash_SHA3_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen ) { - KRML_MAYBE_UNUSED_VAR(capacity); - uint32_t rateInBytes = rate / 8U; - uint64_t s[25U] = { 0U }; - absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix); - Hacl_Hash_SHA3_squeeze0(s, rateInBytes, outputByteLen, output); + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, state, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b0 = output; + memcpy(b0 + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = + state[i + + 0U] + ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + state[0U] = state[0U] ^ c; + } + } } diff --git a/src/Hacl_Hash_SHA3_Simd256.c b/src/Hacl_Hash_SHA3_Simd256.c new file mode 100644 index 00000000..5dfbf960 --- /dev/null +++ b/src/Hacl_Hash_SHA3_Simd256.c @@ -0,0 +1,6733 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "Hacl_Hash_SHA3_Simd256.h" + +#include "internal/Hacl_Hash_SHA3.h" + +void +Hacl_Hash_SHA3_Simd256_absorb_inner_256( + uint32_t rateInBytes, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec256 *s +) +{ + KRML_MAYBE_UNUSED_VAR(rateInBytes); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b2 = b.snd.snd.fst; + uint8_t *b1 = b.snd.fst; + uint8_t *b0 = b.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } +} + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 168U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x1FU; + b12[inputByteLen % rateInBytes1] = 0x1FU; + b22[inputByteLen % rateInBytes1] = 0x1FU; + b32[inputByteLen % rateInBytes1] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x1FU; + b12[inputByteLen % rateInBytes1] = 0x1FU; + b22[inputByteLen % rateInBytes1] = 0x1FU; + b32[inputByteLen % rateInBytes1] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 144U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 28U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 28U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 28U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 28U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 32U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 32U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 32U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 32U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 104U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 48U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 48U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 48U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 48U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 72U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 64U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 64U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 64U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 64U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +/** +Allocate quadruple state buffer (200-bytes for each) +*/ +Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void) +{ + Lib_IntVector_Intrinsics_vec256 + *buf = + (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, + sizeof (Lib_IntVector_Intrinsics_vec256) * 25U); + memset(buf, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + return buf; +} + +/** +Free quadruple state buffer +*/ +void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s) +{ + KRML_ALIGNED_FREE(s); +} + +/** +Absorb number of blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + for (uint32_t i = 0U; i < inputByteLen / 168U; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b0 = input0; + uint8_t *b1 = input1; + uint8_t *b2 = input2; + uint8_t *b3 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * 168U, 168U * sizeof (uint8_t)); + memcpy(bl1, b1 + i * 168U, 168U * sizeof (uint8_t)); + memcpy(bl2, b2 + i * 168U, 168U * sizeof (uint8_t)); + memcpy(bl3, b3 + i * 168U, 168U * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(168U, b_, state); + } +} + +/** +Absorb a final partial blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses a sequence of bytes at end of each input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffers are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] + + Note: Full size of input buffers must be passed to `inputByteLen` including + the number of full-block bytes at start of each input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % 168U; + uint8_t *b01 = input0; + uint8_t *b11 = input1; + uint8_t *b21 = input2; + uint8_t *b31 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % 168U] = 0x1FU; + b12[inputByteLen % 168U] = 0x1FU; + b22[inputByteLen % 168U] = 0x1FU; + b32[inputByteLen % 168U] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b2 = b.snd.snd.fst; + uint8_t *b1 = b.snd.fst; + uint8_t *b0 = b.fst; + b0[167U] = 0x80U; + b1[167U] = 0x80U; + b2[167U] = 0x80U; + b3[167U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(168U, b, state); +} + +/** +Squeeze a quadruple hash state to 4 output buffers + + This function is intended to receive a quadruple hash state and 4 output buffers. + It produces 4 outputs, each is multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b0 = output0; + uint8_t *b1 = output1; + uint8_t *b2 = output2; + uint8_t *b3 = output3; + memcpy(b0 + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + memcpy(b1 + i0 * 168U, hbuf + 256U, 168U * sizeof (uint8_t)); + memcpy(b2 + i0 * 168U, hbuf + 512U, 168U * sizeof (uint8_t)); + memcpy(b3 + i0 * 168U, hbuf + 768U, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v07; + state[1U + 5U * i] = v17; + state[2U + 5U * i] = v27; + state[3U + 5U * i] = v37; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } +} + diff --git a/src/Hacl_K256_ECDSA.c b/src/Hacl_K256_ECDSA.c index bbd2c615..0b72b166 100644 --- a/src/Hacl_K256_ECDSA.c +++ b/src/Hacl_K256_ECDSA.c @@ -571,10 +571,6 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f) { return true; } - if (a2 > 0xffffffffffffffffULL) - { - return false; - } if (a1 < 0x5d576e7357a4501dULL) { return true; diff --git a/src/Hacl_SHA2_Vec128.c b/src/Hacl_SHA2_Vec128.c index 02af75b1..18f9a73a 100644 --- a/src/Hacl_SHA2_Vec128.c +++ b/src/Hacl_SHA2_Vec128.c @@ -42,7 +42,10 @@ static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) +sha224_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec128 *hash +) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -295,7 +298,7 @@ sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) static inline void sha224_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -310,7 +313,7 @@ sha224_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update4(mb, st); } @@ -320,7 +323,7 @@ static inline void sha224_update_last4( uint64_t totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -374,13 +377,13 @@ sha224_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha224_update4(last0, hash); if (blocks > 1U) { @@ -390,7 +393,10 @@ sha224_update_last4( } static inline void -sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) +sha224_finish4( + Lib_IntVector_Intrinsics_vec128 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -485,9 +491,9 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha224_init4(st); @@ -503,7 +509,7 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update_last4(len_, rem, lb, st); sha224_finish4(st, rb); @@ -522,7 +528,10 @@ static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) +sha256_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec128 *hash +) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -775,7 +784,7 @@ sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) static inline void sha256_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -790,7 +799,7 @@ sha256_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update4(mb, st); } @@ -800,7 +809,7 @@ static inline void sha256_update_last4( uint64_t totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -854,13 +863,13 @@ sha256_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha256_update4(last0, hash); if (blocks > 1U) { @@ -870,7 +879,10 @@ sha256_update_last4( } static inline void -sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) +sha256_finish4( + Lib_IntVector_Intrinsics_vec128 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -965,9 +977,9 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha256_init4(st); @@ -983,7 +995,7 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update_last4(len_, rem, lb, st); sha256_finish4(st, rb); diff --git a/src/Hacl_SHA2_Vec256.c b/src/Hacl_SHA2_Vec256.c index c34767f5..4098d4c7 100644 --- a/src/Hacl_SHA2_Vec256.c +++ b/src/Hacl_SHA2_Vec256.c @@ -1541,7 +1541,10 @@ static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) +sha384_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec256 *hash +) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -1778,7 +1781,7 @@ sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) static inline void sha384_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -1793,7 +1796,7 @@ sha384_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update4(mb, st); } @@ -1803,7 +1806,7 @@ static inline void sha384_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -1857,13 +1860,13 @@ sha384_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha384_update4(last0, hash); if (blocks > 1U) { @@ -1873,7 +1876,10 @@ sha384_update_last4( } static inline void -sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) +sha384_finish4( + Lib_IntVector_Intrinsics_vec256 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -1960,9 +1966,9 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha384_init4(st); @@ -1978,7 +1984,7 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update_last4(len_, rem, lb, st); sha384_finish4(st, rb); @@ -1997,7 +2003,10 @@ static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) +sha512_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec256 *hash +) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -2234,7 +2243,7 @@ sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) static inline void sha512_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -2249,7 +2258,7 @@ sha512_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update4(mb, st); } @@ -2259,7 +2268,7 @@ static inline void sha512_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -2313,13 +2322,13 @@ sha512_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha512_update4(last0, hash); if (blocks > 1U) { @@ -2329,7 +2338,10 @@ sha512_update_last4( } static inline void -sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) +sha512_finish4( + Lib_IntVector_Intrinsics_vec256 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -2416,9 +2428,9 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha512_init4(st); @@ -2434,7 +2446,7 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update_last4(len_, rem, lb, st); sha512_finish4(st, rb); diff --git a/src/Lib_RandomBuffer_System.c b/src/Lib_RandomBuffer_System.c index 0d7924b4..de6ef337 100644 --- a/src/Lib_RandomBuffer_System.c +++ b/src/Lib_RandomBuffer_System.c @@ -31,6 +31,7 @@ bool read_random_bytes(uint32_t len, uint8_t *buf) { #include #include #include +#include #include bool read_random_bytes(uint32_t len, uint8_t *buf) { diff --git a/src/msvc/EverCrypt_DRBG.c b/src/msvc/EverCrypt_DRBG.c index 1395f59f..c76a69cd 100644 --- a/src/msvc/EverCrypt_DRBG.c +++ b/src/msvc/EverCrypt_DRBG.c @@ -1798,8 +1798,8 @@ static void uninstantiate_sha1(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 20U, uint8_t); - Lib_Memzero0_memzero(v, 20U, uint8_t); + Lib_Memzero0_memzero(k, 20U, uint8_t, void *); + Lib_Memzero0_memzero(v, 20U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); @@ -1822,8 +1822,8 @@ static void uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 32U, uint8_t); - Lib_Memzero0_memzero(v, 32U, uint8_t); + Lib_Memzero0_memzero(k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(v, 32U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); @@ -1846,8 +1846,8 @@ static void uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 48U, uint8_t); - Lib_Memzero0_memzero(v, 48U, uint8_t); + Lib_Memzero0_memzero(k, 48U, uint8_t, void *); + Lib_Memzero0_memzero(v, 48U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); @@ -1870,8 +1870,8 @@ static void uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st) uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, 64U, uint8_t); - Lib_Memzero0_memzero(v, 64U, uint8_t); + Lib_Memzero0_memzero(k, 64U, uint8_t, void *); + Lib_Memzero0_memzero(v, 64U, uint8_t, void *); ctr[0U] = 0U; KRML_HOST_FREE(k); KRML_HOST_FREE(v); diff --git a/src/msvc/EverCrypt_Hash.c b/src/msvc/EverCrypt_Hash.c index 92b3c227..bfafa9be 100644 --- a/src/msvc/EverCrypt_Hash.c +++ b/src/msvc/EverCrypt_Hash.c @@ -709,25 +709,57 @@ static void finish(EverCrypt_Hash_state_s *s, uint8_t *dst) if (scrut.tag == SHA3_224_s) { uint64_t *p1 = scrut.case_SHA3_224_s; - Hacl_Hash_SHA3_squeeze0(p1, 144U, 28U, dst); + uint32_t remOut = 28U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == SHA3_256_s) { uint64_t *p1 = scrut.case_SHA3_256_s; - Hacl_Hash_SHA3_squeeze0(p1, 136U, 32U, dst); + uint32_t remOut = 32U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == SHA3_384_s) { uint64_t *p1 = scrut.case_SHA3_384_s; - Hacl_Hash_SHA3_squeeze0(p1, 104U, 48U, dst); + uint32_t remOut = 48U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == SHA3_512_s) { uint64_t *p1 = scrut.case_SHA3_512_s; - Hacl_Hash_SHA3_squeeze0(p1, 72U, 64U, dst); + uint32_t remOut = 64U; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, p1, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(dst + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); return; } if (scrut.tag == Blake2S_s) diff --git a/src/msvc/Hacl_Ed25519.c b/src/msvc/Hacl_Ed25519.c index 05d96cd0..d1f8edf2 100644 --- a/src/msvc/Hacl_Ed25519.c +++ b/src/msvc/Hacl_Ed25519.c @@ -1712,8 +1712,8 @@ static inline void secret_expand(uint8_t *expanded, uint8_t *secret) /** Compute the public key from the private key. - The outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] public_key Points to 32 bytes of valid memory, i.e., `uint8_t[32]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. */ void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key) { @@ -1726,8 +1726,8 @@ void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key) /** Compute the expanded keys for an Ed25519 signature. - The outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. + @param[out] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -1744,11 +1744,10 @@ void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key) /** Create an Ed25519 signature with the (precomputed) expanded keys. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The argument `expanded_keys` is obtained through `expand_keys`. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `expanded_keys` nor `msg`. + @param[in] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`, containing the expanded keys obtained by invoking `expand_keys`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. If one needs to sign several messages under the same private key, it is more efficient to call `expand_keys` only once and `sign_expanded` multiple times, for each message. @@ -1783,9 +1782,10 @@ Hacl_Ed25519_sign_expanded( /** Create an Ed25519 signature. - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `private_key` nor `msg`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. The function first calls `expand_keys` and then invokes `sign_expanded`. @@ -1803,11 +1803,12 @@ Hacl_Ed25519_sign(uint8_t *signature, uint8_t *private_key, uint32_t msg_len, ui /** Verify an Ed25519 signature. - The function returns `true` if the signature is valid and `false` otherwise. + @param public_key Points to 32 bytes of valid memory containing the public key, i.e., `uint8_t[32]`. + @param msg_len Length of `msg`. + @param msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. + @param signature Points to 64 bytes of valid memory containing the signature, i.e., `uint8_t[64]`. - The argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. + @return Returns `true` if the signature is valid and `false` otherwise. */ bool Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t *signature) diff --git a/src/msvc/Hacl_Frodo1344.c b/src/msvc/Hacl_Frodo1344.c index 61262a4c..ea380d8c 100644 --- a/src/msvc/Hacl_Frodo1344.c +++ b/src/msvc/Hacl_Frodo1344.c @@ -45,7 +45,7 @@ uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 32U; uint8_t *z = coins + 64U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake256(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 21552U; uint16_t s_matrix[10752U] = { 0U }; @@ -54,8 +54,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[33U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43008U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 43008U, shake_input_seed_se, 33U); + Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r + 21504U, e_matrix); uint16_t b_matrix[10752U] = { 0U }; @@ -66,14 +66,14 @@ uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(1344U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(1344U, 8U, 16U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(1344U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t, void *); uint32_t slen1 = 43056U; uint8_t *sk_p = sk; memcpy(sk_p, s, 32U * sizeof (uint8_t)); memcpy(sk_p + 32U, pk, 21520U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, sk + slen1); - Lib_Memzero0_memzero(coins, 80U, uint8_t); + Hacl_Hash_SHA3_shake256(sk + slen1, 32U, pk, 21520U); + Lib_Memzero0_memzero(coins, 80U, uint8_t, void *); return 0U; } @@ -83,9 +83,9 @@ uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(32U, coins); uint8_t seed_se_k[64U] = { 0U }; uint8_t pkh_mu[64U] = { 0U }; - Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, pkh_mu); + Hacl_Hash_SHA3_shake256(pkh_mu, 32U, pk, 21520U); memcpy(pkh_mu + 32U, coins, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(64U, pkh_mu, 64U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 64U, pkh_mu, 64U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 32U; uint8_t *seed_a = pk; @@ -97,8 +97,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[33U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 43136U, shake_input_seed_se, 33U); + Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix); @@ -119,22 +119,22 @@ uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 21664U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t)); memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 21632U * sizeof (uint8_t)); memcpy(shake_input_ss + 21632U, k, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 32U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t); - Lib_Memzero0_memzero(coins, 32U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 32U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 32U, uint8_t, void *); return 0U; } @@ -154,8 +154,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 1344U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 4U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[64U] = { 0U }; uint32_t pkh_mu_decode_len = 64U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -164,7 +164,7 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 43056U; memcpy(pkh_mu_decode, pkh, 32U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 32U, mu_decode, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 64U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 64U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 32U; uint8_t *s = sk; @@ -177,8 +177,8 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[33U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 43136U, shake_input_seed_se, 33U); + Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix); @@ -197,12 +197,12 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 1344U, 16U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 1344U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -222,11 +222,11 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 21632U * sizeof (uint8_t)); memcpy(ss_init + 21632U, kp_s, 32U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 32U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 32U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 32U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 32U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 32U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 32U, uint8_t, void *); return 0U; } diff --git a/src/msvc/Hacl_Frodo64.c b/src/msvc/Hacl_Frodo64.c index 392d87f9..f4a025ce 100644 --- a/src/msvc/Hacl_Frodo64.c +++ b/src/msvc/Hacl_Frodo64.c @@ -50,7 +50,7 @@ uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 16U; uint8_t *z = coins + 32U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake128(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 992U; uint16_t s_matrix[512U] = { 0U }; @@ -59,8 +59,8 @@ uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2048U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 2048U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r + 1024U, e_matrix); uint16_t b_matrix[512U] = { 0U }; @@ -70,14 +70,14 @@ uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(64U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(64U, 8U, 15U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(64U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 512U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 512U, uint16_t, void *); uint32_t slen1 = 2016U; uint8_t *sk_p = sk; memcpy(sk_p, s, 16U * sizeof (uint8_t)); memcpy(sk_p + 16U, pk, 976U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, sk + slen1); - Lib_Memzero0_memzero(coins, 48U, uint8_t); + Hacl_Hash_SHA3_shake128(sk + slen1, 16U, pk, 976U); + Lib_Memzero0_memzero(coins, 48U, uint8_t, void *); return 0U; } @@ -87,9 +87,9 @@ uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(16U, coins); uint8_t seed_se_k[32U] = { 0U }; uint8_t pkh_mu[32U] = { 0U }; - Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, pkh_mu); + Hacl_Hash_SHA3_shake128(pkh_mu, 16U, pk, 976U); memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu, 32U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 16U; uint8_t *seed_a = pk; @@ -101,8 +101,8 @@ uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 2176U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix); @@ -122,22 +122,22 @@ uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 1096U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t)); memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 1080U * sizeof (uint8_t)); memcpy(shake_input_ss + 1080U, k, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(coins, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 16U, uint8_t, void *); return 0U; } @@ -157,8 +157,8 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 64U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[32U] = { 0U }; uint32_t pkh_mu_decode_len = 32U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -167,7 +167,7 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 2016U; memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 16U; uint8_t *s = sk; @@ -180,8 +180,8 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 2176U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix); @@ -199,12 +199,12 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 64U, 15U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 64U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -225,11 +225,11 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 1080U * sizeof (uint8_t)); memcpy(ss_init + 1080U, kp_s, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 16U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 16U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 16U, uint8_t, void *); return 0U; } diff --git a/src/msvc/Hacl_Frodo640.c b/src/msvc/Hacl_Frodo640.c index 5de5871f..e3d10300 100644 --- a/src/msvc/Hacl_Frodo640.c +++ b/src/msvc/Hacl_Frodo640.c @@ -45,7 +45,7 @@ uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 16U; uint8_t *z = coins + 32U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake128(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 9632U; uint16_t s_matrix[5120U] = { 0U }; @@ -54,8 +54,8 @@ uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20480U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 20480U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r + 10240U, e_matrix); uint16_t b_matrix[5120U] = { 0U }; @@ -66,14 +66,14 @@ uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(640U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(640U, 8U, 15U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(640U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t, void *); uint32_t slen1 = 19872U; uint8_t *sk_p = sk; memcpy(sk_p, s, 16U * sizeof (uint8_t)); memcpy(sk_p + 16U, pk, 9616U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, sk + slen1); - Lib_Memzero0_memzero(coins, 48U, uint8_t); + Hacl_Hash_SHA3_shake128(sk + slen1, 16U, pk, 9616U); + Lib_Memzero0_memzero(coins, 48U, uint8_t, void *); return 0U; } @@ -83,9 +83,9 @@ uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(16U, coins); uint8_t seed_se_k[32U] = { 0U }; uint8_t pkh_mu[32U] = { 0U }; - Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, pkh_mu); + Hacl_Hash_SHA3_shake128(pkh_mu, 16U, pk, 9616U); memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu, 32U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 16U; uint8_t *seed_a = pk; @@ -97,8 +97,8 @@ uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 20608U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix); @@ -119,22 +119,22 @@ uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 9736U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t)); memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 9720U * sizeof (uint8_t)); memcpy(shake_input_ss + 9720U, k, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(coins, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 16U, uint8_t, void *); return 0U; } @@ -154,8 +154,8 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 640U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[32U] = { 0U }; uint32_t pkh_mu_decode_len = 32U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -164,7 +164,7 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 19872U; memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k); + Hacl_Hash_SHA3_shake128(seed_se_k, 32U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 16U; uint8_t *s = sk; @@ -177,8 +177,8 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[17U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t); + Hacl_Hash_SHA3_shake128(r, 20608U, shake_input_seed_se, 17U); + Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix); @@ -197,12 +197,12 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 640U, 15U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 640U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -223,11 +223,11 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 9720U * sizeof (uint8_t)); memcpy(ss_init + 9720U, kp_s, 16U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 16U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 16U, uint8_t); + Hacl_Hash_SHA3_shake128(ss, 16U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 16U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 16U, uint8_t, void *); return 0U; } diff --git a/src/msvc/Hacl_Frodo976.c b/src/msvc/Hacl_Frodo976.c index 61454ceb..982192c1 100644 --- a/src/msvc/Hacl_Frodo976.c +++ b/src/msvc/Hacl_Frodo976.c @@ -45,7 +45,7 @@ uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t *seed_se = coins + 24U; uint8_t *z = coins + 48U; uint8_t *seed_a = pk; - Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a); + Hacl_Hash_SHA3_shake256(seed_a, 16U, z, 16U); uint8_t *b_bytes = pk + 16U; uint8_t *s_bytes = sk + 15656U; uint16_t s_matrix[7808U] = { 0U }; @@ -54,8 +54,8 @@ uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) uint8_t shake_input_seed_se[25U] = { 0U }; shake_input_seed_se[0U] = 0x5fU; memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31232U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 31232U, shake_input_seed_se, 25U); + Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r, s_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r + 15616U, e_matrix); uint16_t b_matrix[7808U] = { 0U }; @@ -66,14 +66,14 @@ uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) Hacl_Impl_Matrix_matrix_add(976U, 8U, b_matrix, e_matrix); Hacl_Impl_Frodo_Pack_frodo_pack(976U, 8U, 16U, b_matrix, b_bytes); Hacl_Impl_Matrix_matrix_to_lbytes(976U, 8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t, void *); uint32_t slen1 = 31272U; uint8_t *sk_p = sk; memcpy(sk_p, s, 24U * sizeof (uint8_t)); memcpy(sk_p + 24U, pk, 15632U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, sk + slen1); - Lib_Memzero0_memzero(coins, 64U, uint8_t); + Hacl_Hash_SHA3_shake256(sk + slen1, 24U, pk, 15632U); + Lib_Memzero0_memzero(coins, 64U, uint8_t, void *); return 0U; } @@ -83,9 +83,9 @@ uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) randombytes_(24U, coins); uint8_t seed_se_k[48U] = { 0U }; uint8_t pkh_mu[48U] = { 0U }; - Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, pkh_mu); + Hacl_Hash_SHA3_shake256(pkh_mu, 24U, pk, 15632U); memcpy(pkh_mu + 24U, coins, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(48U, pkh_mu, 48U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 48U, pkh_mu, 48U); uint8_t *seed_se = seed_se_k; uint8_t *k = seed_se_k + 24U; uint8_t *seed_a = pk; @@ -97,8 +97,8 @@ uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint8_t shake_input_seed_se[25U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 31360U, shake_input_seed_se, 25U); + Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix); @@ -119,22 +119,22 @@ uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, coins, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2); - Lib_Memzero0_memzero(v_matrix, 64U, uint16_t); - Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(v_matrix, 64U, uint16_t, void *); + Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint32_t ss_init_len = 15768U; KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t)); memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); memcpy(shake_input_ss, ct, 15744U * sizeof (uint8_t)); memcpy(shake_input_ss + 15744U, k, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 24U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t); - Lib_Memzero0_memzero(coins, 24U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 24U, shake_input_ss, ss_init_len); + Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t, void *); + Lib_Memzero0_memzero(coins, 24U, uint8_t, void *); return 0U; } @@ -154,8 +154,8 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) Hacl_Impl_Matrix_matrix_mul_s(8U, 976U, 8U, bp_matrix, s_matrix, m_matrix); Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix); Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 3U, 8U, m_matrix, mu_decode); - Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(m_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(m_matrix, 64U, uint16_t, void *); uint8_t seed_se_k[48U] = { 0U }; uint32_t pkh_mu_decode_len = 48U; KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); @@ -164,7 +164,7 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t *pkh = sk + 31272U; memcpy(pkh_mu_decode, pkh, 24U * sizeof (uint8_t)); memcpy(pkh_mu_decode + 24U, mu_decode, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 48U, seed_se_k); + Hacl_Hash_SHA3_shake256(seed_se_k, 48U, pkh_mu_decode, pkh_mu_decode_len); uint8_t *seed_se = seed_se_k; uint8_t *kp = seed_se_k + 24U; uint8_t *s = sk; @@ -177,8 +177,8 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint8_t shake_input_seed_se[25U] = { 0U }; shake_input_seed_se[0U] = 0x96U; memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r); - Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t); + Hacl_Hash_SHA3_shake256(r, 31360U, shake_input_seed_se, 25U); + Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t, void *); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix); Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix); @@ -197,12 +197,12 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) uint16_t mu_encode[64U] = { 0U }; Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, mu_decode, mu_encode); Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, 64U, uint16_t); + Lib_Memzero0_memzero(mu_encode, 64U, uint16_t, void *); Hacl_Impl_Matrix_mod_pow2(8U, 976U, 16U, bpp_matrix); Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t); - Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t); + Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t, void *); + Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t, void *); uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 976U, bp_matrix, bpp_matrix); uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix); uint16_t mask = (uint32_t)b1 & (uint32_t)b2; @@ -222,11 +222,11 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); memcpy(ss_init, ct, 15744U * sizeof (uint8_t)); memcpy(ss_init + 15744U, kp_s, 24U * sizeof (uint8_t)); - Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 24U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t); - Lib_Memzero0_memzero(kp_s, 24U, uint8_t); - Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t); - Lib_Memzero0_memzero(mu_decode, 24U, uint8_t); + Hacl_Hash_SHA3_shake256(ss, 24U, ss_init, ss_init_len); + Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t, void *); + Lib_Memzero0_memzero(kp_s, 24U, uint8_t, void *); + Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t, void *); + Lib_Memzero0_memzero(mu_decode, 24U, uint8_t, void *); return 0U; } diff --git a/src/msvc/Hacl_Hash_Blake2b.c b/src/msvc/Hacl_Hash_Blake2b.c index 2dceaf4b..68de8340 100644 --- a/src/msvc/Hacl_Hash_Blake2b.c +++ b/src/msvc/Hacl_Hash_Blake2b.c @@ -474,6 +474,7 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) { + uint64_t tmp[8U] = { 0U }; uint64_t *r0 = hash; uint64_t *r1 = hash + 4U; uint64_t *r2 = hash + 8U; @@ -494,16 +495,67 @@ void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; - uint64_t kk_shift_8 = (uint64_t)kk << 8U; - uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn)); + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2s_blake2_params + p = + { + .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; } static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) @@ -519,7 +571,7 @@ static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, ui { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } void @@ -560,7 +612,7 @@ Hacl_Hash_Blake2b_update_last( FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } static void @@ -624,7 +676,7 @@ void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash) KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(second + i * 8U, row1[i]);); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } /** @@ -965,7 +1017,7 @@ Hacl_Hash_Blake2b_hash_with_key( Hacl_Hash_Blake2b_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2b_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 16U, uint64_t); - Lib_Memzero0_memzero(b, 16U, uint64_t); + Lib_Memzero0_memzero(b1, 16U, uint64_t, void *); + Lib_Memzero0_memzero(b, 16U, uint64_t, void *); } diff --git a/src/msvc/Hacl_Hash_Blake2b_Simd256.c b/src/msvc/Hacl_Hash_Blake2b_Simd256.c index 1a5e8cf2..7aea4d42 100644 --- a/src/msvc/Hacl_Hash_Blake2b_Simd256.c +++ b/src/msvc/Hacl_Hash_Blake2b_Simd256.c @@ -26,6 +26,7 @@ #include "internal/Hacl_Hash_Blake2b_Simd256.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "lib_memzero0.h" static inline void @@ -214,6 +215,7 @@ update_block( void Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn) { + uint64_t tmp[8U] = { 0U }; Lib_IntVector_Intrinsics_vec256 *r0 = hash; Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U; Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U; @@ -228,10 +230,61 @@ Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t k uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U]; r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - uint64_t kk_shift_8 = (uint64_t)kk << 8U; - uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn)); - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2s_blake2_params + p = + { + .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); } static void @@ -254,7 +307,7 @@ update_key( { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } void @@ -295,7 +348,7 @@ Hacl_Hash_Blake2b_Simd256_update_last( FStar_UInt128_uint128 totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 128U, uint8_t); + Lib_Memzero0_memzero(b, 128U, uint8_t, void *); } static inline void @@ -371,7 +424,7 @@ Hacl_Hash_Blake2b_Simd256_finish( Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } void @@ -822,7 +875,7 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( Hacl_Hash_Blake2b_Simd256_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2b_Simd256_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256); - Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256, void *); } diff --git a/src/msvc/Hacl_Hash_Blake2s.c b/src/msvc/Hacl_Hash_Blake2s.c index 652c3f33..37fabb67 100644 --- a/src/msvc/Hacl_Hash_Blake2s.c +++ b/src/msvc/Hacl_Hash_Blake2s.c @@ -26,6 +26,7 @@ #include "internal/Hacl_Hash_Blake2s.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "lib_memzero0.h" static inline void @@ -474,6 +475,7 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) { + uint32_t tmp[8U] = { 0U }; uint32_t *r0 = hash; uint32_t *r1 = hash + 4U; uint32_t *r2 = hash + 8U; @@ -494,16 +496,64 @@ void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; - uint32_t kk_shift_8 = kk << 8U; - uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn)); + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2s_blake2_params + p = + { + .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = nn ^ (kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; } static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll) @@ -519,7 +569,7 @@ static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, ui { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } void @@ -556,7 +606,7 @@ Hacl_Hash_Blake2s_update_last( memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } static void @@ -614,7 +664,7 @@ void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash) KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(second + i * 4U, row1[i]);); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 32U, uint8_t); + Lib_Memzero0_memzero(b, 32U, uint8_t, void *); } /** @@ -925,7 +975,7 @@ Hacl_Hash_Blake2s_hash_with_key( Hacl_Hash_Blake2s_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2s_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 16U, uint32_t); - Lib_Memzero0_memzero(b, 16U, uint32_t); + Lib_Memzero0_memzero(b1, 16U, uint32_t, void *); + Lib_Memzero0_memzero(b, 16U, uint32_t, void *); } diff --git a/src/msvc/Hacl_Hash_Blake2s_Simd128.c b/src/msvc/Hacl_Hash_Blake2s_Simd128.c index 73f0cccb..ed86be43 100644 --- a/src/msvc/Hacl_Hash_Blake2s_Simd128.c +++ b/src/msvc/Hacl_Hash_Blake2s_Simd128.c @@ -26,6 +26,7 @@ #include "internal/Hacl_Hash_Blake2s_Simd128.h" #include "internal/Hacl_Impl_Blake2_Constants.h" +#include "internal/Hacl_Hash_Blake2b.h" #include "lib_memzero0.h" static inline void @@ -214,6 +215,7 @@ update_block( void Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn) { + uint32_t tmp[8U] = { 0U }; Lib_IntVector_Intrinsics_vec128 *r0 = hash; Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; @@ -228,10 +230,58 @@ Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t k uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U]; r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - uint32_t kk_shift_8 = kk << 8U; - uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn)); - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2s_blake2_params + p = + { + .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = nn ^ (kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); } static void @@ -254,7 +304,7 @@ update_key( { update_block(wv, hash, false, lb, b); } - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } void @@ -291,7 +341,7 @@ Hacl_Hash_Blake2s_Simd128_update_last( memcpy(b, last, rem * sizeof (uint8_t)); uint64_t totlen = prev + (uint64_t)len; update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, 64U, uint8_t); + Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } static inline void @@ -367,7 +417,7 @@ Hacl_Hash_Blake2s_Simd128_finish( Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]); uint8_t *final = b; memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, 32U, uint8_t); + Lib_Memzero0_memzero(b, 32U, uint8_t, void *); } void @@ -788,7 +838,7 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( Hacl_Hash_Blake2s_Simd128_init(b, key_len, output_len); update(b1, b, key_len, key, input_len, input); Hacl_Hash_Blake2s_Simd128_finish(output_len, output, b); - Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128); - Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128, void *); } diff --git a/src/msvc/Hacl_Hash_SHA3.c b/src/msvc/Hacl_Hash_SHA3.c index 4f502866..b8551af3 100644 --- a/src/msvc/Hacl_Hash_SHA3.c +++ b/src/msvc/Hacl_Hash_SHA3.c @@ -25,6 +25,34 @@ #include "internal/Hacl_Hash_SHA3.h" +const +uint32_t +Hacl_Hash_SHA3_keccak_rotc[24U] = + { + 1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U, + 39U, 61U, 20U, 44U + }; + +const +uint32_t +Hacl_Hash_SHA3_keccak_piln[24U] = + { + 10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U, + 22U, 9U, 6U, 1U + }; + +const +uint64_t +Hacl_Hash_SHA3_keccak_rndc[24U] = + { + 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL + }; + static uint32_t block_len(Spec_Hash_Definitions_hash_alg a) { switch (a) @@ -97,10 +125,128 @@ Hacl_Hash_SHA3_update_multi_sha3( uint32_t n_blocks ) { - for (uint32_t i = 0U; i < n_blocks; i++) + uint32_t l = block_len(a) * n_blocks; + for (uint32_t i0 = 0U; i0 < l / block_len(a); i0++) { - uint8_t *block = blocks + i * block_len(a); - Hacl_Hash_SHA3_absorb_inner(block_len(a), block, s); + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = blocks; + uint8_t *bl0 = b_; + uint8_t *uu____0 = b0 + i0 * block_len(a); + memcpy(bl0, uu____0, block_len(a) * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____1 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____1 << 1U | uu____1 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____2 = current; + s[_Y] = uu____2 << r | uu____2 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } } } @@ -124,37 +270,608 @@ Hacl_Hash_SHA3_update_last_sha3( uint32_t len = block_len(a); if (input_len == len) { - Hacl_Hash_SHA3_absorb_inner(len, input, s); - uint8_t lastBlock_[200U] = { 0U }; - uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, input + input_len, 0U * sizeof (uint8_t)); - lastBlock[0U] = suffix; - Hacl_Hash_SHA3_loadState(len, lastBlock, s); - if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U == len - 1U) - { - Hacl_Hash_SHA3_state_permute(s); - } - uint8_t nextBlock_[200U] = { 0U }; - uint8_t *nextBlock = nextBlock_; - nextBlock[len - 1U] = 0x80U; - Hacl_Hash_SHA3_loadState(len, nextBlock, s); - Hacl_Hash_SHA3_state_permute(s); + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint8_t *b00 = input; + uint8_t *bl00 = b_; + memcpy(bl00, b00 + 0U * len, len * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b3 = b_; + uint64_t u0 = load64_le(b3); + ws[0U] = u0; + uint64_t u1 = load64_le(b3 + 8U); + ws[1U] = u1; + uint64_t u2 = load64_le(b3 + 16U); + ws[2U] = u2; + uint64_t u3 = load64_le(b3 + 24U); + ws[3U] = u3; + uint64_t u4 = load64_le(b3 + 32U); + ws[4U] = u4; + uint64_t u5 = load64_le(b3 + 40U); + ws[5U] = u5; + uint64_t u6 = load64_le(b3 + 48U); + ws[6U] = u6; + uint64_t u7 = load64_le(b3 + 56U); + ws[7U] = u7; + uint64_t u8 = load64_le(b3 + 64U); + ws[8U] = u8; + uint64_t u9 = load64_le(b3 + 72U); + ws[9U] = u9; + uint64_t u10 = load64_le(b3 + 80U); + ws[10U] = u10; + uint64_t u11 = load64_le(b3 + 88U); + ws[11U] = u11; + uint64_t u12 = load64_le(b3 + 96U); + ws[12U] = u12; + uint64_t u13 = load64_le(b3 + 104U); + ws[13U] = u13; + uint64_t u14 = load64_le(b3 + 112U); + ws[14U] = u14; + uint64_t u15 = load64_le(b3 + 120U); + ws[15U] = u15; + uint64_t u16 = load64_le(b3 + 128U); + ws[16U] = u16; + uint64_t u17 = load64_le(b3 + 136U); + ws[17U] = u17; + uint64_t u18 = load64_le(b3 + 144U); + ws[18U] = u18; + uint64_t u19 = load64_le(b3 + 152U); + ws[19U] = u19; + uint64_t u20 = load64_le(b3 + 160U); + ws[20U] = u20; + uint64_t u21 = load64_le(b3 + 168U); + ws[21U] = u21; + uint64_t u22 = load64_le(b3 + 176U); + ws[22U] = u22; + uint64_t u23 = load64_le(b3 + 184U); + ws[23U] = u23; + uint64_t u24 = load64_le(b3 + 192U); + ws[24U] = u24; + uint64_t u25 = load64_le(b3 + 200U); + ws[25U] = u25; + uint64_t u26 = load64_le(b3 + 208U); + ws[26U] = u26; + uint64_t u27 = load64_le(b3 + 216U); + ws[27U] = u27; + uint64_t u28 = load64_le(b3 + 224U); + ws[28U] = u28; + uint64_t u29 = load64_le(b3 + 232U); + ws[29U] = u29; + uint64_t u30 = load64_le(b3 + 240U); + ws[30U] = u30; + uint64_t u31 = load64_le(b3 + 248U); + ws[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + uint8_t b4[256U] = { 0U }; + uint8_t *b_0 = b4; + uint32_t rem = 0U % len; + uint8_t *b01 = input + input_len; + uint8_t *bl0 = b_0; + memcpy(bl0, b01 + 0U - rem, rem * sizeof (uint8_t)); + uint8_t *b02 = b_0; + b02[0U % len] = suffix; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_0; + uint64_t u32 = load64_le(b); + ws0[0U] = u32; + uint64_t u33 = load64_le(b + 8U); + ws0[1U] = u33; + uint64_t u34 = load64_le(b + 16U); + ws0[2U] = u34; + uint64_t u35 = load64_le(b + 24U); + ws0[3U] = u35; + uint64_t u36 = load64_le(b + 32U); + ws0[4U] = u36; + uint64_t u37 = load64_le(b + 40U); + ws0[5U] = u37; + uint64_t u38 = load64_le(b + 48U); + ws0[6U] = u38; + uint64_t u39 = load64_le(b + 56U); + ws0[7U] = u39; + uint64_t u40 = load64_le(b + 64U); + ws0[8U] = u40; + uint64_t u41 = load64_le(b + 72U); + ws0[9U] = u41; + uint64_t u42 = load64_le(b + 80U); + ws0[10U] = u42; + uint64_t u43 = load64_le(b + 88U); + ws0[11U] = u43; + uint64_t u44 = load64_le(b + 96U); + ws0[12U] = u44; + uint64_t u45 = load64_le(b + 104U); + ws0[13U] = u45; + uint64_t u46 = load64_le(b + 112U); + ws0[14U] = u46; + uint64_t u47 = load64_le(b + 120U); + ws0[15U] = u47; + uint64_t u48 = load64_le(b + 128U); + ws0[16U] = u48; + uint64_t u49 = load64_le(b + 136U); + ws0[17U] = u49; + uint64_t u50 = load64_le(b + 144U); + ws0[18U] = u50; + uint64_t u51 = load64_le(b + 152U); + ws0[19U] = u51; + uint64_t u52 = load64_le(b + 160U); + ws0[20U] = u52; + uint64_t u53 = load64_le(b + 168U); + ws0[21U] = u53; + uint64_t u54 = load64_le(b + 176U); + ws0[22U] = u54; + uint64_t u55 = load64_le(b + 184U); + ws0[23U] = u55; + uint64_t u56 = load64_le(b + 192U); + ws0[24U] = u56; + uint64_t u57 = load64_le(b + 200U); + ws0[25U] = u57; + uint64_t u58 = load64_le(b + 208U); + ws0[26U] = u58; + uint64_t u59 = load64_le(b + 216U); + ws0[27U] = u59; + uint64_t u60 = load64_le(b + 224U); + ws0[28U] = u60; + uint64_t u61 = load64_le(b + 232U); + ws0[29U] = u61; + uint64_t u62 = load64_le(b + 240U); + ws0[30U] = u62; + uint64_t u63 = load64_le(b + 248U); + ws0[31U] = u63; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U % len == len - 1U) + { + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b5[256U] = { 0U }; + uint8_t *b6 = b5; + uint8_t *b0 = b6; + b0[len - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b6; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u64 = load64_le(b1 + 8U); + ws1[1U] = u64; + uint64_t u65 = load64_le(b1 + 16U); + ws1[2U] = u65; + uint64_t u66 = load64_le(b1 + 24U); + ws1[3U] = u66; + uint64_t u67 = load64_le(b1 + 32U); + ws1[4U] = u67; + uint64_t u68 = load64_le(b1 + 40U); + ws1[5U] = u68; + uint64_t u69 = load64_le(b1 + 48U); + ws1[6U] = u69; + uint64_t u70 = load64_le(b1 + 56U); + ws1[7U] = u70; + uint64_t u71 = load64_le(b1 + 64U); + ws1[8U] = u71; + uint64_t u72 = load64_le(b1 + 72U); + ws1[9U] = u72; + uint64_t u73 = load64_le(b1 + 80U); + ws1[10U] = u73; + uint64_t u74 = load64_le(b1 + 88U); + ws1[11U] = u74; + uint64_t u75 = load64_le(b1 + 96U); + ws1[12U] = u75; + uint64_t u76 = load64_le(b1 + 104U); + ws1[13U] = u76; + uint64_t u77 = load64_le(b1 + 112U); + ws1[14U] = u77; + uint64_t u78 = load64_le(b1 + 120U); + ws1[15U] = u78; + uint64_t u79 = load64_le(b1 + 128U); + ws1[16U] = u79; + uint64_t u80 = load64_le(b1 + 136U); + ws1[17U] = u80; + uint64_t u81 = load64_le(b1 + 144U); + ws1[18U] = u81; + uint64_t u82 = load64_le(b1 + 152U); + ws1[19U] = u82; + uint64_t u83 = load64_le(b1 + 160U); + ws1[20U] = u83; + uint64_t u84 = load64_le(b1 + 168U); + ws1[21U] = u84; + uint64_t u85 = load64_le(b1 + 176U); + ws1[22U] = u85; + uint64_t u86 = load64_le(b1 + 184U); + ws1[23U] = u86; + uint64_t u87 = load64_le(b1 + 192U); + ws1[24U] = u87; + uint64_t u88 = load64_le(b1 + 200U); + ws1[25U] = u88; + uint64_t u89 = load64_le(b1 + 208U); + ws1[26U] = u89; + uint64_t u90 = load64_le(b1 + 216U); + ws1[27U] = u90; + uint64_t u91 = load64_le(b1 + 224U); + ws1[28U] = u91; + uint64_t u92 = load64_le(b1 + 232U); + ws1[29U] = u92; + uint64_t u93 = load64_le(b1 + 240U); + ws1[30U] = u93; + uint64_t u94 = load64_le(b1 + 248U); + ws1[31U] = u94; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } return; } - uint8_t lastBlock_[200U] = { 0U }; - uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, input, input_len * sizeof (uint8_t)); - lastBlock[input_len] = suffix; - Hacl_Hash_SHA3_loadState(len, lastBlock, s); - if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len == len - 1U) + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = input_len % len; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + input_len - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[input_len % len] = suffix; + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len % len == len - 1U) + { + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____6 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____6 << 1U | uu____6 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____7 = current; + s[_Y] = uu____7 << r | uu____7 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[len - 1U] = 0x80U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws0[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws0[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws0[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws0[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws0[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws0[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws0[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws0[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws0[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws0[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws0[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws0[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws0[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws0[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws0[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws0[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws0[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws0[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws0[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws0[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws0[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws0[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws0[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws0[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws0[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws0[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws0[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws0[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws0[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws0[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws0[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws0[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) { - Hacl_Hash_SHA3_state_permute(s); + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____8 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____8 << 1U | uu____8 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____9 = current; + s[_Y] = uu____9 << r | uu____9 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; } - uint8_t nextBlock_[200U] = { 0U }; - uint8_t *nextBlock = nextBlock_; - nextBlock[len - 1U] = 0x80U; - Hacl_Hash_SHA3_loadState(len, nextBlock, s); - Hacl_Hash_SHA3_state_permute(s); } typedef struct hash_buf2_s @@ -463,10 +1180,139 @@ digest_( uint64_t *s = tmp_block_state.snd; if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256) { - Hacl_Hash_SHA3_squeeze0(s, block_len(a11), l, output); + for (uint32_t i0 = 0U; i0 < l / block_len(a11); i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b0 = output; + uint8_t *uu____0 = hbuf; + memcpy(b0 + i0 * block_len(a11), uu____0, block_len(a11) * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____1 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____1 << 1U | uu____1 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r1 = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____2 = current; + s[_Y] = uu____2 << r1 | uu____2 >> (64U - r1); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = l % block_len(a11); + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + l - remOut, hbuf, remOut * sizeof (uint8_t)); return; } - Hacl_Hash_SHA3_squeeze0(s, block_len(a11), hash_len(a11), output); + for (uint32_t i0 = 0U; i0 < hash_len(a11) / block_len(a11); i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b0 = output; + uint8_t *uu____3 = hbuf; + memcpy(b0 + i0 * block_len(a11), uu____3, block_len(a11) * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r1 = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r1 | uu____5 >> (64U - r1); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = hash_len(a11) % block_len(a11); + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *uu____6 = hbuf; + memcpy(output + hash_len(a11) - remOut, uu____6, remOut * sizeof (uint8_t)); } Hacl_Streaming_Types_error_code @@ -516,77 +1362,290 @@ bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s) } void -Hacl_Hash_SHA3_shake128_hacl( - uint32_t inputByteLen, - uint8_t *input, +Hacl_Hash_SHA3_shake128( + uint8_t *output, uint32_t outputByteLen, - uint8_t *output -) -{ - Hacl_Hash_SHA3_keccak(1344U, 256U, inputByteLen, input, 0x1FU, outputByteLen, output); -} - -void -Hacl_Hash_SHA3_shake256_hacl( - uint32_t inputByteLen, uint8_t *input, - uint32_t outputByteLen, - uint8_t *output + uint32_t inputByteLen ) { - Hacl_Hash_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x1FU, outputByteLen, output); -} - -void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t input_len) -{ - Hacl_Hash_SHA3_keccak(1152U, 448U, input_len, input, 0x06U, 28U, output); -} - -void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t input_len) -{ - Hacl_Hash_SHA3_keccak(1088U, 512U, input_len, input, 0x06U, 32U, output); -} - -void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t input_len) -{ - Hacl_Hash_SHA3_keccak(832U, 768U, input_len, input, 0x06U, 48U, output); -} - -void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t input_len) -{ - Hacl_Hash_SHA3_keccak(576U, 1024U, input_len, input, 0x06U, 64U, output); -} - -static const -uint32_t -keccak_rotc[24U] = + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 168U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) { - 1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U, - 39U, 61U, 20U, 44U - }; - -static const -uint32_t -keccak_piln[24U] = + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) { - 10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U, - 22U, 9U, 6U, 1U - }; - -static const -uint64_t -keccak_rndc[24U] = + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes1 - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) { - 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, - 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, - 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, - 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, - 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL, - 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL - }; - -void Hacl_Hash_SHA3_state_permute(uint64_t *s) -{ + s[i] = s[i] ^ ws1[i]; + } for (uint32_t i0 = 0U; i0 < 24U; i0++) { uint64_t _C[5U] = { 0U }; @@ -599,18 +1658,18 @@ void Hacl_Hash_SHA3_state_permute(uint64_t *s) 0U, 5U, 1U, - uint64_t uu____0 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); uint64_t x = s[1U]; uint64_t current = x; for (uint32_t i = 0U; i < 24U; i++) { - uint32_t _Y = keccak_piln[i]; - uint32_t r = keccak_rotc[i]; + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; - uint64_t uu____1 = current; - s[_Y] = uu____1 << r | uu____1 >> (64U - r); + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); current = temp; } KRML_MAYBE_FOR5(i, @@ -627,108 +1686,2468 @@ void Hacl_Hash_SHA3_state_permute(uint64_t *s) s[2U + 5U * i] = v2; s[3U + 5U * i] = v3; s[4U + 5U * i] = v4;); - uint64_t c = keccak_rndc[i0]; + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; s[0U] = s[0U] ^ c; } -} - -void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s) -{ - uint8_t block[200U] = { 0U }; - memcpy(block, input, rateInBytes * sizeof (uint8_t)); - for (uint32_t i = 0U; i < 25U; i++) + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) { - uint64_t u = load64_le(block + i * 8U); - uint64_t x = u; - s[i] = s[i] ^ x; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } } -} - -static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res) -{ - uint8_t block[200U] = { 0U }; - for (uint32_t i = 0U; i < 25U; i++) + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) { - uint64_t sj = s[i]; - store64_le(block + i * 8U, sj); + store64_le(hbuf + i * 8U, ws[i]); } - memcpy(res, block, rateInBytes * sizeof (uint8_t)); -} - -void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s) -{ - Hacl_Hash_SHA3_loadState(rateInBytes, block, s); - Hacl_Hash_SHA3_state_permute(s); -} - -static void -absorb( - uint64_t *s, - uint32_t rateInBytes, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix -) -{ - uint32_t n_blocks = inputByteLen / rateInBytes; - uint32_t rem = inputByteLen % rateInBytes; - for (uint32_t i = 0U; i < n_blocks; i++) - { - uint8_t *block = input + i * rateInBytes; - Hacl_Hash_SHA3_absorb_inner(rateInBytes, block, s); - } - uint8_t *last = input + n_blocks * rateInBytes; - uint8_t lastBlock_[200U] = { 0U }; - uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, last, rem * sizeof (uint8_t)); - lastBlock[rem] = delimitedSuffix; - Hacl_Hash_SHA3_loadState(rateInBytes, lastBlock, s); - if (!(((uint32_t)delimitedSuffix & 0x80U) == 0U) && rem == rateInBytes - 1U) - { - Hacl_Hash_SHA3_state_permute(s); - } - uint8_t nextBlock_[200U] = { 0U }; - uint8_t *nextBlock = nextBlock_; - nextBlock[rateInBytes - 1U] = 0x80U; - Hacl_Hash_SHA3_loadState(rateInBytes, nextBlock, s); - Hacl_Hash_SHA3_state_permute(s); + memcpy(rb + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); } void -Hacl_Hash_SHA3_squeeze0( - uint64_t *s, - uint32_t rateInBytes, +Hacl_Hash_SHA3_shake256( + uint8_t *output, uint32_t outputByteLen, - uint8_t *output + uint8_t *input, + uint32_t inputByteLen ) { - uint32_t outBlocks = outputByteLen / rateInBytes; - uint32_t remOut = outputByteLen % rateInBytes; - uint8_t *last = output + outputByteLen - remOut; - uint8_t *blocks = output; - for (uint32_t i = 0U; i < outBlocks; i++) + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes1 - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) { - storeState(rateInBytes, s, blocks + i * rateInBytes); - Hacl_Hash_SHA3_state_permute(s); + store64_le(hbuf + i * 8U, ws[i]); } - storeState(remOut, s, last); + memcpy(rb + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); } -void -Hacl_Hash_SHA3_keccak( - uint32_t rate, - uint32_t capacity, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix, - uint32_t outputByteLen, - uint8_t *output -) +void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen) { - KRML_MAYBE_UNUSED_VAR(capacity); - uint32_t rateInBytes = rate / 8U; + uint8_t *ib = input; + uint8_t *rb = output; uint64_t s[25U] = { 0U }; - absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix); - Hacl_Hash_SHA3_squeeze0(s, rateInBytes, outputByteLen, output); + uint32_t rateInBytes1 = 144U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes1 - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 28U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes1 - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 32U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 104U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes1 - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 48U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 72U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes1 - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 64U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +/** +Allocate state buffer of 200-bytes +*/ +uint64_t *Hacl_Hash_SHA3_state_malloc(void) +{ + uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t)); + return buf; +} + +/** +Free state buffer +*/ +void Hacl_Hash_SHA3_state_free(uint64_t *s) +{ + KRML_HOST_FREE(s); +} + +/** +Absorb number of input blocks and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t inputByteLen) +{ + for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * 168U, 168U * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = + state[i + + 0U] + ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + state[0U] = state[0U] ^ c; + } + } +} + +/** +Absorb a final partial block of input and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses a sequence of bytes at end of input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffer are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] + + Note: Full size of input buffer must be passed to `inputByteLen` including + the number of full-block bytes at start of input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_shake128_absorb_final(uint64_t *state, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem = inputByteLen % 168U; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % 168U] = 0x1FU; + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[167U] = 0x80U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws0[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws0[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws0[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws0[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws0[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws0[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws0[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws0[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws0[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws0[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws0[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws0[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws0[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws0[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws0[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws0[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws0[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws0[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws0[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws0[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws0[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws0[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws0[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws0[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws0[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws0[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws0[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws0[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws0[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws0[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws0[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws0[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws0[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = state[i + 0U] ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i1 + 5U * i] = state[i1 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + state[0U] = state[0U] ^ c; + } +} + +/** +Squeeze a hash state to output buffer + + This function is intended to receive a hash state and output buffer. + It produces an output of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN) points to hash state, i.e., uint64_t[25] + The argument `output` (OUT) points to `outputByteLen` bytes of valid memory, + i.e., uint8_t[outputByteLen] +*/ +void +Hacl_Hash_SHA3_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, state, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + uint8_t *b0 = output; + memcpy(b0 + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = + state[i + + 0U] + ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + state[0U] = state[0U] ^ c; + } + } } diff --git a/src/msvc/Hacl_Hash_SHA3_Simd256.c b/src/msvc/Hacl_Hash_SHA3_Simd256.c new file mode 100644 index 00000000..b14b01eb --- /dev/null +++ b/src/msvc/Hacl_Hash_SHA3_Simd256.c @@ -0,0 +1,11396 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "Hacl_Hash_SHA3_Simd256.h" + +#include "internal/Hacl_Hash_SHA3.h" + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 168U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x1FU; + b12[inputByteLen % rateInBytes1] = 0x1FU; + b22[inputByteLen % rateInBytes1] = 0x1FU; + b32[inputByteLen % rateInBytes1] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b35[rateInBytes1 - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x1FU; + b12[inputByteLen % rateInBytes1] = 0x1FU; + b22[inputByteLen % rateInBytes1] = 0x1FU; + b32[inputByteLen % rateInBytes1] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b35[rateInBytes1 - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 144U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b35[rateInBytes1 - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 28U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 28U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 28U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 28U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b35[rateInBytes1 - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 32U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 32U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 32U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 32U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 104U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b35[rateInBytes1 - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 48U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 48U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 48U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 48U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 72U; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b35[rateInBytes1 - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 64U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 64U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 64U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 64U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +/** +Allocate quadruple state buffer (200-bytes for each) +*/ +Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void) +{ + Lib_IntVector_Intrinsics_vec256 + *buf = + (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, + sizeof (Lib_IntVector_Intrinsics_vec256) * 25U); + memset(buf, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + return buf; +} + +/** +Free quadruple state buffer +*/ +void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s) +{ + KRML_ALIGNED_FREE(s); +} + +/** +Absorb number of blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b01 = input0; + uint8_t *b11 = input1; + uint8_t *b21 = input2; + uint8_t *b31 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * 168U, 168U * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v07; + state[1U + 5U * i] = v17; + state[2U + 5U * i] = v27; + state[3U + 5U * i] = v37; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } +} + +/** +Absorb a final partial blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses a sequence of bytes at end of each input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffers are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] + + Note: Full size of input buffers must be passed to `inputByteLen` including + the number of full-block bytes at start of each input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % 168U; + uint8_t *b01 = input0; + uint8_t *b11 = input1; + uint8_t *b21 = input2; + uint8_t *b31 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % 168U] = 0x1FU; + b12[inputByteLen % 168U] = 0x1FU; + b22[inputByteLen % 168U] = 0x1FU; + b32[inputByteLen % 168U] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws32 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws[0U] = ws00; + ws[1U] = ws110; + ws[2U] = ws210; + ws[3U] = ws32; + ws[4U] = ws40; + ws[5U] = ws50; + ws[6U] = ws60; + ws[7U] = ws70; + ws[8U] = ws80; + ws[9U] = ws90; + ws[10U] = ws100; + ws[11U] = ws111; + ws[12U] = ws120; + ws[13U] = ws130; + ws[14U] = ws140; + ws[15U] = ws150; + ws[16U] = ws160; + ws[17U] = ws170; + ws[18U] = ws180; + ws[19U] = ws190; + ws[20U] = ws200; + ws[21U] = ws211; + ws[22U] = ws220; + ws[23U] = ws230; + ws[24U] = ws240; + ws[25U] = ws250; + ws[26U] = ws260; + ws[27U] = ws270; + ws[28U] = ws280; + ws[29U] = ws290; + ws[30U] = ws300; + ws[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[167U] = 0x80U; + b15[167U] = 0x80U; + b25[167U] = 0x80U; + b35[167U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws33[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b2 = b.snd.snd.fst; + uint8_t *b1 = b.snd.fst; + uint8_t *b0 = b.fst; + ws33[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws33[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws33[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws33[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws33[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws33[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws33[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws33[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws33[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws33[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws33[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws33[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws33[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws33[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws33[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws33[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws33[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws33[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws33[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws33[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws33[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws33[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws33[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws33[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws33[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws33[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws33[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws33[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws33[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws33[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws33[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws33[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws33[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws33[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws33[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws33[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws33[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws33[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws33[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws33[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws33[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws33[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws33[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws33[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws33[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws33[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws33[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws33[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws33[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws33[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws33[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws33[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws33[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws33[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws33[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws33[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws33[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws33[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws33[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws33[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws33[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws33[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws33[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws33[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws33[0U] = ws0; + ws33[1U] = ws1; + ws33[2U] = ws2; + ws33[3U] = ws3; + ws33[4U] = ws4; + ws33[5U] = ws5; + ws33[6U] = ws6; + ws33[7U] = ws7; + ws33[8U] = ws8; + ws33[9U] = ws9; + ws33[10U] = ws10; + ws33[11U] = ws11; + ws33[12U] = ws12; + ws33[13U] = ws13; + ws33[14U] = ws14; + ws33[15U] = ws15; + ws33[16U] = ws16; + ws33[17U] = ws17; + ws33[18U] = ws18; + ws33[19U] = ws19; + ws33[20U] = ws20; + ws33[21U] = ws21; + ws33[22U] = ws22; + ws33[23U] = ws23; + ws33[24U] = ws24; + ws33[25U] = ws25; + ws33[26U] = ws26; + ws33[27U] = ws27; + ws33[28U] = ws28; + ws33[29U] = ws29; + ws33[30U] = ws30; + ws33[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws33[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v015; + state[1U + 5U * i] = v115; + state[2U + 5U * i] = v215; + state[3U + 5U * i] = v315; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } +} + +/** +Squeeze a quadruple hash state to 4 output buffers + + This function is intended to receive a quadruple hash state and 4 output buffers. + It produces 4 outputs, each is multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b0 = output0; + uint8_t *b1 = output1; + uint8_t *b2 = output2; + uint8_t *b3 = output3; + memcpy(b0 + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + memcpy(b1 + i0 * 168U, hbuf + 256U, 168U * sizeof (uint8_t)); + memcpy(b2 + i0 * 168U, hbuf + 512U, 168U * sizeof (uint8_t)); + memcpy(b3 + i0 * 168U, hbuf + 768U, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v07; + state[1U + 5U * i] = v17; + state[2U + 5U * i] = v27; + state[3U + 5U * i] = v37; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } +} + diff --git a/src/msvc/Hacl_K256_ECDSA.c b/src/msvc/Hacl_K256_ECDSA.c index f9bf31ed..0aaab085 100644 --- a/src/msvc/Hacl_K256_ECDSA.c +++ b/src/msvc/Hacl_K256_ECDSA.c @@ -571,10 +571,6 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f) { return true; } - if (a2 > 0xffffffffffffffffULL) - { - return false; - } if (a1 < 0x5d576e7357a4501dULL) { return true; diff --git a/src/msvc/Hacl_SHA2_Vec128.c b/src/msvc/Hacl_SHA2_Vec128.c index 02af75b1..18f9a73a 100644 --- a/src/msvc/Hacl_SHA2_Vec128.c +++ b/src/msvc/Hacl_SHA2_Vec128.c @@ -42,7 +42,10 @@ static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) +sha224_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec128 *hash +) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -295,7 +298,7 @@ sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) static inline void sha224_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -310,7 +313,7 @@ sha224_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update4(mb, st); } @@ -320,7 +323,7 @@ static inline void sha224_update_last4( uint64_t totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -374,13 +377,13 @@ sha224_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha224_update4(last0, hash); if (blocks > 1U) { @@ -390,7 +393,10 @@ sha224_update_last4( } static inline void -sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) +sha224_finish4( + Lib_IntVector_Intrinsics_vec128 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -485,9 +491,9 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha224_init4(st); @@ -503,7 +509,7 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update_last4(len_, rem, lb, st); sha224_finish4(st, rb); @@ -522,7 +528,10 @@ static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) +sha256_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec128 *hash +) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -775,7 +784,7 @@ sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) static inline void sha256_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -790,7 +799,7 @@ sha256_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update4(mb, st); } @@ -800,7 +809,7 @@ static inline void sha256_update_last4( uint64_t totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -854,13 +863,13 @@ sha256_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha256_update4(last0, hash); if (blocks > 1U) { @@ -870,7 +879,10 @@ sha256_update_last4( } static inline void -sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) +sha256_finish4( + Lib_IntVector_Intrinsics_vec128 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -965,9 +977,9 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha256_init4(st); @@ -983,7 +995,7 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update_last4(len_, rem, lb, st); sha256_finish4(st, rb); diff --git a/src/msvc/Hacl_SHA2_Vec256.c b/src/msvc/Hacl_SHA2_Vec256.c index c34767f5..4098d4c7 100644 --- a/src/msvc/Hacl_SHA2_Vec256.c +++ b/src/msvc/Hacl_SHA2_Vec256.c @@ -1541,7 +1541,10 @@ static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) +sha384_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec256 *hash +) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -1778,7 +1781,7 @@ sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) static inline void sha384_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -1793,7 +1796,7 @@ sha384_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update4(mb, st); } @@ -1803,7 +1806,7 @@ static inline void sha384_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -1857,13 +1860,13 @@ sha384_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha384_update4(last0, hash); if (blocks > 1U) { @@ -1873,7 +1876,10 @@ sha384_update_last4( } static inline void -sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) +sha384_finish4( + Lib_IntVector_Intrinsics_vec256 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -1960,9 +1966,9 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha384_init4(st); @@ -1978,7 +1984,7 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update_last4(len_, rem, lb, st); sha384_finish4(st, rb); @@ -1997,7 +2003,10 @@ static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) +sha512_update4( + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Lib_IntVector_Intrinsics_vec256 *hash +) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -2234,7 +2243,7 @@ sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) static inline void sha512_update_nblocks4( uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -2249,7 +2258,7 @@ sha512_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update4(mb, st); } @@ -2259,7 +2268,7 @@ static inline void sha512_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - Hacl_Hash_SHA2_uint8_4p b, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -2313,13 +2322,13 @@ sha512_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; - Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; sha512_update4(last0, hash); if (blocks > 1U) { @@ -2329,7 +2338,10 @@ sha512_update_last4( } static inline void -sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) +sha512_finish4( + Lib_IntVector_Intrinsics_vec256 *st, + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h +) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -2416,9 +2428,9 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *input3 ) { - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha512_init4(st); @@ -2434,7 +2446,7 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - Hacl_Hash_SHA2_uint8_4p + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update_last4(len_, rem, lb, st); sha512_finish4(st, rb); diff --git a/src/msvc/Lib_RandomBuffer_System.c b/src/msvc/Lib_RandomBuffer_System.c index 0d7924b4..de6ef337 100644 --- a/src/msvc/Lib_RandomBuffer_System.c +++ b/src/msvc/Lib_RandomBuffer_System.c @@ -31,6 +31,7 @@ bool read_random_bytes(uint32_t len, uint8_t *buf) { #include #include #include +#include #include bool read_random_bytes(uint32_t len, uint8_t *buf) { diff --git a/src/wasm/EverCrypt_Hash.wasm b/src/wasm/EverCrypt_Hash.wasm index 101c3f6834aed5d76bde19f1242639e8d080fc39..a5f05d9df8aef198166f523bcfafc86a034516b0 100644 GIT binary patch delta 2618 zcma);e{5UT702Iw_Ir-~p6!<(P8vH-?u*mdFF37#H*FIqPFg7W<(7^~V`Gs4mn2O? zmV_uNgqVsJq|rckXay8Twys;HQgC?{Lo0s-A@K(jktRT5MXa##2mio27@9Ojvh&^z zS*lL7|J?gM_uO;d@#md)ub+9?^8UYhH0EYg&xU^vAt5YjjfcyvNc7@At}TwDy%=ujpUsZgSl8THE{pfJ-Nv|d?Yl$f>2H> z;eyaas^GGaBpYz7hb>|`sfHdgO1v;D#)uD!IM%@5L=E=a+~BkM;g_}m{Me?zpKx9f zByl7+G09PQ!4@Mbj`i?Qn?mYAD2b2&q)MWs0S0m02oF1`{b%>*^9S=0t`V!-Eaax9 zxgfO3Rp7E$$RYdAsr-1?_{`X32s(rY5#2FPGyV73J){W^+ndNuP_&0h7(TLxNHdNN zmSi*rbB;zK7E5rO@hJ@g{#Gu6Dnz1&E#eL?W4U6OB<;9RV`qT5j-avb%8lGRaNoXxiAjXc^%)yr(P`uR>kO}L-*`UHWlb#(VwZ5Y zf$aL1Yp>(-+lLCA|AU`nE4iT5?Il*Y(pRnL-EUdN440vb8}c5_83_>U9F3SO3M_b{ zjwp?ywa6)@P*$b6VgVjE#Mn|9d|XFiq^h~BTrHs`%^auNGk^c~)vsrj3OHL;oAh{@ zDNr6$j8H;3rBb@uqP7gD3aUs2DyBG2dK{Lkda;29sHRVDIBX#;kn692%hXdErxr{z zswQZh@-(5eK%%A%yX>=o1}AGOu*-@y+p)_?un8*trKm!F!yXE1aMWLhYQg^ptIwO^ zpfpt&4w?!1J~L!LEF^u7kwkycP#H%@RMB5yuZ<(Wk-d)J5u6YY7mF z&(`#?dXXop{#oEt9^126qeu2l2u5N>M#;W!NxXin}eqP%rl1A7NN#NLLjxDVkoQ=q6KY6YK zR$4v!Cy_3m|3=Uo;}7xVA5hdb>Tf4kZ2Zv@{7qHx=d=P}wk`38ZL8)!!;@*>x_-mg ziE4Pgz0KJ44SLvKy|07kJ(~Iyp-&OxO&=$#ueu5D>8$6^I`qdne?!WG>NK4;{4=ZW z5Tti_`Qvu|jvdo>vPFNl?>U0gSXPzDR{a-uY`5UFbZ6FvQ*FN6u!aT;R?-Un<6-FD zKMKX!1kMNG$=@}Y_S{UPZ>yI&sPo*c>Zg^|Pkq#>Z1bWd%63?sIRr0_Vcfwa&Ssqc z(%1`xwCeriUyu$1W|CW5~_dIJcQd4@ol zU(uDv*ASS?h(MXY4uaiJtRWDOjp`k60|}Nl zon$GOPMQctpT7=*<-!p_2Dx%9^d(uQ(-9lc2t+o zu7~!2(iL88x6Jj*l0-R7-y=Ji*d^PUxJ52u;%3>##7RcrG+?;CgMoBh)IF{$RKbge;CXKxrg-0QN>aGmPT@^* z)rORWFpL{)HCa%#Q@fGf@!eUZN_cEYc!G&g4z-;j1;}%dX)Dt0Wyc WhO`M6;3WL3;P`!~{*%|*O8yJ9{>>u* delta 3225 zcmbtWYitzP8NGMB@16B7#s;(Yns5Bv0b};zUBB%en}^>IUV)IX*)+>@y*8|&5Wsej zortDMYCa$}Y5*xMMXB64nKY@2(p0J{s-$fcRjvAm^bu96+8?Bes#cYN-n(}_qEC?e zW9OdloVjQ2oO|`n{^IY_{4;rOlE{owLI}MyGd)eV%(;i=hTL;Q<~L^Ep_cc~GplDp zzLmKDpXr^%W5>8TLYLtMb9hC8X-6WJbY=D@LXl|v==f8Ki6mK$9hUNQmgTe%3oUDC z5i28f1t;-k#;(L~S%S0}Z&^Zg6?&{8>cuXrN=tCmx|WvWJ!?7MuzE0Mb)(Bxc+TQP zwECrRBfe`5;x?NH?=mH0E2j!xv;}E7{+L}W@V5@IA3mB)KAjAZO18T7=ESk%qzYe= zS7O>;C|BDz9!nnSI&xxs!jD(Yl@=z4NKID%&|XCS_rs-nkHOV?nV!%x?;Yo$37 z43S#qUTF@6>d88RB2iMuke{y4*8H9TzH9R0fK#PG{Gv0AA!h*&n(g=_r;5j%1?S{) zT8|a-8@@=ip|PpCrM0cSBc^qBZP>VJb9YZ~U;n`1(C`TUSss*LT8^4)9c{reSB2CH zIPNOJ*IgD&yL|Rs$n7+2*t0``G=Z2>Dj2e>GRFdygcOWFU(z``MWw z!9~9(H?`)l+nm{?}jLq@;h@{cF>d5`guZNuw#vlsc#c zSz%i$LadONim-P3&f9-E?QtvhQ12&1i5Nr_qJCmh8VsTl8vVqkG#NxQH2aCHv=~Gy zwEBrhY15{bT-%}DPYRU|T};!6W}8qpKt!9Kp0U9O4KkTbtFjRSu#wLR2)}^v3kbiU@CypR zpzsUk{P1w0TCk~tnx*h}OoU?PWzaT3+Jw!?Onz?8DXU><=-RN_~5 z8pJNxrILX1s6p(8-70BR9)ms@Y+zCJ!Egi1tq-;|5Dy!+HIQQHhaFt&hn-yOhex^A z505cTBMX{s;aTNz80^f3`*@f31F|qXJFD!09@xX@^$5=%;n^cRdxdAO@az?yy*W?( zU7_lVS4b@0xbg&xH;gZ~uEh6zE^I1VSFlX6fo&a&%bt6W0z6(+7Fq5Ur5Pl)h7?dB zo}e}>`J|Dyw|boN<8gt zVSVnWm22=5?=psHm5OC01q=mCxH4MuJ=OvJr7`+*9I1}tmC~E67w+@Lq#sJT#UEmA zQ9NfgYmM=;2I|9?*|m&4NY(#Vc0;18+0#`0mck&gN2<5Sfj0a02_+0f3)I-#UlJvYy|12;m(Pxmh7xN9!FNW_~ zrB@v6ooev=EkBbk*cm*{8CP4=k}%e`g9|qqlhuoLZ#$KWYLxR7&Qrd17iezp{1}Lp zOE+YFTkPw!ph`Io$GLtYCs(7R(<^<$saJO%x6_DzrSEH0igEu2<-umXHg0xnOZmqF@j z!cR|~!Kc${9Nr&c<0xK9ROlztS82{8D;=s|lJG=&3|~K0{Xol)Gs5!YjIg|w;g3O@7P744kGL`mp#dK z{3Pq|zuGS0vsY@R+xdF*8n@Ehuiu~bf0)z%T1Vbfy|U8@gn9JH4gogFb^$iZHUT!s zRsp(XivZoSS%A&5Nq|l{PXJAp1c=F0fDXL9FeJU;(PQ7c2}qz4*;zw4g?Dfs2QhZ*-5VdP!H<43xEe-Y40&o_Ok6*Z&uSy&L~r z=cZLDcCd3FhY|i=-8&WJ)4;F^Wo_=Sc`!8FjsrXz0oU_z1gzun2)K?1Bw#I%NWir` zBmoC`Oacz^peS_kKEH-%C1_P9POz~LmPN6G1+4so^`&X{Kaa3a;)}};R+^m7i^yuq h2q*ia`OCLD^A4mB;74zd0Jn_9GS7J_P zdS1Ml2?NW-a|(>ClZ6;H8QCV=Fv>BqPp)Is-h7(Th!M#A$D|7+b(k$BIHVjIG#D5h v4=^|~=7120mnFb5xt4jEI80IiLoP;EQ9waJ0Hy##_hyy}j4Yd7SdEwfMYJnt delta 200 zcmeyy_l<9YyewlqV|@Z+9aBA!1T*0TbAA0pn~upkjGT;&lg$_v7?~!XlV>qDVql&e z&Zx=AGP#jaj*)fZIVDE6$qN{@8QCX(RNQ=;(TI_eb@D$ZT_CB$Y$?Gm<;bAHz~Fd* u!I3csgh0G30mjL-%*(`Kk^&fVF|vvR3IYN!1sJ+FvrJ%Q-0Z??!~_7Kr!6Z0 diff --git a/src/wasm/Hacl_Ed25519_PrecompTable.wasm b/src/wasm/Hacl_Ed25519_PrecompTable.wasm index 46a7380db06358dcdef5459264c6b9f1dc726630..c94538f0c5740cc5e275c8a82bc368902b64046d 100644 GIT binary patch delta 103 zcmX@yz<9WUaf19rOQDIXA`|_&Ci=&5C8d_+=cF1NF)&UnlxHzCV_=#*nNg9Ed19gD z#I>A^EEBgYFtSekt*OAq9iCX68(dmYkY7|HfW(e3DM~D0V4s}MsJ(d$W4|2$i_0N6 delta 83 zcmX@yz<9WUaf19rZ7F8v`ud5E5)=JdCq{;FCFW$N=f#_uFfdM*XH;Nhnph~$Vr;~~ mJlTO!laXa|7NZ;^D@e@HjDc-(C!;na`@~y{o3}9b+W`R5?G`8i diff --git a/src/wasm/Hacl_HMAC.wasm b/src/wasm/Hacl_HMAC.wasm index c2e51b851c0e3a3f21be3160b08d2c2ff85f9351..4837783c3caf2566c5fc53b594b01adb3cef5172 100644 GIT binary patch delta 4288 zcmZ`+-BVT96+c_9kw|BX6N8ni`7J>N1V2!e4+Rb%0?L=7sK_^na=9Xk#zfIe5{)s@ zC^=DMqPC5i_)upuuUTgh+d+ojU zT8BUV+sysbEcb?IP8-kj%>DW0&oeTT$@!nw+t=Sa%Haynm8XkU;5pf)+29Nk67zhSKc~mmi+mzul}*Dr1|atKA4;HmMpnC z^=jRkCFa#HyNZ{r@jV~et8=kl@{l9>i22^$Skljy4cHKL_7?0YK!NY=^gL|v&7^F| zsN=xfAFK1dLQWP?iY4MGppZ5SWckXP0@>)l_OKCo^_~uLvU78NZz+dTh!_f|q)1=W zQj8)ZqL|EQV@V3+*%SbI@0Fkgn-sQ$mYYzRnB3`cfl`VR*^JG>*{uBDE!cu0wb_DV zr`?JzvJKm^qzq*YFEinrM4aoB?Th4??7((nN;xuQCo)h@J2jPE48H;uHvCFdy6`J> zSrrVwA`*TjB@Mqa4Br`P`0rFol}25q;UmX4*}u*W=DIKDVmqf3*n>P2zQj&!r-FPn zsIh#tsC9fb%2z|anh0MlCFQFP`J9pRt*@0j>_8pz2>%l0FJ~$3vRj^&>qC6_f%FyZ zs7Jl;RdKs~#rKKtNAT-)n)vmlUc^Z8d+VhE4bq4PKRdNgO~{kIB#h|?uL5-=KmIu+9O4UQ?mcuw~Q?&;@=u(>=9B`?61U+&TN3-M@ zj*&E-s=bTkm>fqhF{KY3(vJ@Gh5IQ3Or1dt+SD1skV~CGUDhBsVvvhXox>0%O`V}| z@12nbzj{c9HC2ags;;rAO4t#MSlCgFI@l3~9U<&U1a_2?!j6Wp&PZX`j>;H%ZL*Is z**jmNAH6QwPvC@w9>=(YKB3Sj2z?>~Jx)oX$3tjmq|ocedBu`3{y^qO$90Qu{ zkW)Bi6MX^`F40dZ_9lsbgGlaK^K26W*h-X5j1iuJ z80-{Ex2XL5vC?e%G4AF=8SA$O|n-h-)%-S@hxYw<7{U6r)GOA zTCvM2TAiYmRlS8rk!|}1A+awi&B}iDGi?^Pos~VrZFlbN(xIA_o#+g4JDs9ax>T{^ z0E>9&a=>bWzl;^H2yMHaZMPhU3mL97(f+L~gh*KPqqpC=%_Gci2ktFkMEq(2Ww_mdG)Bjzv6&D5X6| ztd#N`Q_nG7m_HzFEi%q;fs-2O|5yE!TJ_z>!1%)OjSf&ZK;P8YzyxCo3nBAG;#46% zC6o3&EeT9=%a8~Q5wR!bv=$;v@poDa(Uh8=Mm$xC&R|NWF>Nam3p)5s>+zLvmFOHx z{0wHYk{|$ z5?7n5M3*V)g}oe}fEoo7&&jwfvzo<4S8&C-+gB#8$W`tEW*Oa8=y!sx4oM`Htb&?3 zLPvel^39o$(3$DM#kjQ}1^;Tx4?4=;+8Vco;A0DxiI0tZVsL@X34CP8 zP3Z$%aOBqvt{J5d{?oj)!m(d7N`K9epTl&#*VWdp;CgBWp=sc+dN+36$fvr5)i(@o z7}D8Q+z3~3!^mf5SA(14FqJ`9x)jeKix%izmJf#U8W(bhE@eHER}#Mg_&S$tvi zxgVWnb>q}GI`xZ%E^r0k2J`K!*WEJmox%I~R1?CEKGXuoJO9YwQ7~OmSpC@GF`e~) zokun!9%)8!n9c~AsET*su?fq7H4VD@S4})N^2FeYk>BXwbfx<(Ddl(kldg5&6I7lm z{8JMtT6CnGE?ac6a;k7qVC8xmm{qGPpQ-n=s5f~ir@g-q<&-y-^nRwf+s;)!H+XL3 m2Ln%jG`bNl6!Aqg+#ij~VF9G&f)K`TWqF~9=1GuqKL3Ag=8u^G delta 5458 zcmbtXYj9Q76+Vkx@v2OFs}YbAyH*n74G-mIB4{utJOkzh0rC(oNeBeE31GrAj~l8% z6a>Q|R0S*4s#S;u;{#MQh+43%kIGE_VW<7s={V!B{%dFYt-a4Z_r@82b>`%pwf0); zTWjy^z4mu|zomEoNjEzU?XBcE4jt{k`K6mFL5X#v6NyAm|M}bcKOarh|JQQ5K9om` zLPZmnG&IFyrLEiIv66VhmgbqU`CHrCI~v+L+QpcbG_<$)ruJAYwz0V*wrOW|Lu0%- zw!#~@;l%%iP%>0Ix`sww|7K|D#;J>M`sVrGUT4&(p69&b8wb3@6WfOVRWW%~LUK6U z4Y?F|L-Ea$3niRXCP5jKRPx;K|NHj8yCZkVU3^#6aU>cHSW5yz-1QCiaO(Eb}(=+-y3x%N&!#eXo2J4 zGr7PC;WM?s3FC8jfis5lc)Hs1c&6I&c(&T|xBxcSal;)_$Wgbq_pvY+y4>F0UMlx- z4&SFSITkO+;^kPpT#J`$@p3I*uEFDMnZsjcE{9xaQ7GYMRqq<-4m(`Lb4nZ)#zrPg zaGe_9uGoXfq_Re7^3xWaWbWw}dRq&%Y-g0n1C>^fLju7HOB1y?{xIeFX1 zWZqFBl?q&`8F;Kqk)Ka+BT~gx26j0wx3ED}Yq_l8QUJ$! z1<&DmgZvPeTIg!7Hqa}1rG>6m=xRV$r$MiTrqC;WXzQfV*(+rgFXdG{2k?WuY_JPj znOT=xCb%rCd9_zrn>V|LYg}h8Ccj$oR|9`_8h(uq1HWdPljfxOH8oPpwNl5mZbWw~ zyriDz$iqk&Z0a?xBWnS<)(w>=7r9O~^*6u|8*Q^+{#utdd*^<7`@M*ZAug5{SFc(S zx5jA6xESIjrezh^$~s;r4LnyGG0P@^G^Nd_5gKhVnHvJppPO2wNkI~|F1w*fcmg)r zdd#UgeNG#o1P65^lrcINJ;E))f!V~HFeBL<3~b>odQjuRKr6TEL2U~Lw(?dzsM~ZP zVNUyY-mWM0k-(*$+x4J!1Oq#GhaS|O!N4xwr3ZC4Z{*Fofj4qo593B|)5Extx9LG` z;YZZg!X0XB;hk!0;oZSO-IL(W_N4AHCsp=xGw;=yW{cNs@tQ5(28*}B;%%^a8!XW&aOGLh?8< z20P<&fOx$;ffpYz#ps@Xl5oxs5+5Wzjzx!v4*^dz9)!;pn6Sklrn(4RpzEnTME<_^ zUA^;}x8He)B7NjzdK zBHp&y93XTX3_MBNZ&;2IA0zEHMFYeGfYyF<%w#*J*&wE}!9`8THbDN#^j$UErU8=U z#K%c~rSDYlc?QYlS*;-GKF@dNLYDM4v>h<4d0>exBsl#E!f`Scg-Jb}D_cFOcefM}lLZ_%k=UoC4ao zQ7(J`Xw59Ub{1?RZ&Z&Ys0SSANB%N;qOj%ss9obSdSvLkx|tbiOeCDnL{tfwh$=}Y zlBG-}A7?4aM6v`-Bp>}O$waaQOe7!OE6GH%1WY6!%__-6vII;d-{BM!$r3P;d?y+( zkt_id$#AAK(W{oIHmGXH!4@O{`PnhS)gD!d-I1C$MY~)VE zVR}i5ImE?n;_7Fcl(38A!X5qd}-;$^^;GR^>>%HT5X*V-a0Fy%_F z)G5yfBeJuq)UqnE-z!mU^6syKrYljUD}frUlU})Ac2j*CHD}bz!`|hoIk&IjHNZDH*VvqEWUX3e#W==g z#t^fH80g7bta8LG9UysKy4EEP8b^%=w~3pQxJ@>^N!F|7)@EGA7Pr|Nnmt|+&49PJ zbU^`?dHc-!f_Z8eF~3=!-=Y?28kFbix*|ANtJfQ9&6P&F_RpA4$@DEX``%Tf^(1~L+i zfpF|6?U;|J#y~#~U@QU(;`=4Cz&~(}YU2clu)o-{#E=l}ogfm)F~c6TdxAM6$8hmK zjo9ge>pD4R?tAT_xUErfAL+ju!s8IaDm(!pqQXHcOrX1gPCaFuARIAH5c-W11P?;c zRdLVvk?pIvvhlM+`zr2ptlEHqfMD}Luz4UL57J;QZ5jwR4TNHw2a?T$UWhw?*ul-; z1_EHEZGt-)3VPx*cpF{`75~F$N!z0L^z6|&&yw~$FaiYQ!aq*Q-gtsADkpJ^E1yi5 zD;sF)4fz=eN;+{h<0i*#{Q@}UDRWP!bIQ{{7zxR5h%tyX@}lz9-{QqDqA1%jUs6)} zGV#k4xe-ksEYT8~u=ibTT+u7UuaKTp2M#)qmkGNUVru)s1v>T_JoSvET$LnwqaKuk?fWrJL$!mo6z`w&T<=2RncA74DhWHF=A1pdceAdr<#^gPtc_F6q!UekKJ)6wy zyK3IhS&~6-#;n}AuM^`rZhPYEehsga{9cy@WyE<<>wt=tIqg?+j`*Cdo`1>$!{%$DCDp_@l)QbD$IAth zKM>>OyrFx;iZ78~D`?YDl!(#oIlvOgiTi+>JS`9c$Xk^Un9fzVTz4}y$t?kf}q1wR75 zFh3!_)aWnMqmc)CD*AI@Pe#Lt=r6U_GF15$@mC~&A$H_z?O$?T5wE9D_-j(#zt7+q TDE^IRbh)mG^qKczonPV^I-7|l5`dii8kMo~tl$#IMdjLehk z88sPMCZAx`W@MeXL7v6XjDc<921%~OoXqsRcrz0Q_Q@+5<#;$aJra|1;yrvFoy8^> zFo|v!VG?HMV^vgeJiy?{m;*!t%#%%7Gk94QB^(XlBCEh60*(rB5qUO{8V*MhxX2W? zE}%h<$Oh@KgA`dJ8MF{AVu)mr7zap=CXzwj963P${NPYzWS=a>d5Dp9@?B18M)u7= zIcu3DSfv~pG#D5ZwlYF22J^B6*f;m_{APs99!8g4$oGj6ZqNn+2PPorqmUTTR1V>H z2z`^#_3?>5L+BF}H)IlrIUpF#{z%DfAe92rqCi8{q&Fe7eU(Adri7-=R(=~J+)QYI z!~Mji2zK>fB?+LJV#>J)hxVg8bhgTSgzQ{&*+ps}LGFPVv{u6z#QCTRaw)skDun%| zXpTFfy$hjjjxJc+13i$oZ+fdhG7Akrob3i15Xu^joDl|!qZxSH7|AwOG|4%p5lm97 zil><&F?^C4O`gv}10??vD*qf^KEP@ZGtf=EcA(Hwv(o^@vA+X|li{!o#Nl@aaWtG2 zLB?ETf>?8f3C)^h*VQ0n_&vdjJ;92XLG4_EZfCOh0+3-#86k!(Mh|#dKShwu_5mQn zA_7)30~LRTD*lY2I0T|N46Hb88c65+2v8zri@XYqT}1-h4Tv||&UsNK1#Evg1N-SVtpPbC7&BMXzk(itl z@8Rp{EH=4-Np!OalQ1(MhoXYx0R~6L93T>4oovdQ!ONj2;b;ICSp^mma8!Vc$g_de za5##Xr`EQF2bSx=nkE&@*W{O7hQId+DDLkAO@|~a0YQc zYJyzKuC)qbe<_;dPH68!Xq%%8*7iUTr0tvDDv-=V0}yAs!3Kn~Mk8m0f#PTe-Znv$%|jEKb6j8$8!0R&qQammEJSl(R-gT zaB?@+-rQQfv9|rr>dMyI=KA~_s~hib>?m(C{MRgu-g<9qXG0~zr)Jqmh7(q?l+OP@ z;`aLX`i{y391@r+8{V>t#jz8Z-@d)Ru5#f6i?aFHTWdROtJ@nNtXrmY_lXp{|FiXb zF!Ie^{vHc|nnA_bb=%gi)z0i{RaJ^cwM{l`f3(=c+SW$+V6NDXA-1BLN`V!dWLwMp zppTYL>>cX?SnW2?+_&P zpHCk?`N(&uokWtk$scG^NY#{0>474HOilTe9Vo^yR#O4x28ulLH3bR=iUJBXRi)xU zF^=(?nx)b}F@cGiTA<0Bh{Kom<48A^%_7@WK8sva1xO$eTLML~B~TJu0+VdhK9x}( zjMJCajPS2|{&fIaEpb>`CR*l+mWL(^QS(GmS3^!GO%z0&WC~L!BzdKRNf!{cHh(sfLku5*I=u>>Q+A!~TXj{G&Toj7e-gx-8((@cEA3*GP~M8&>gC*t`3f=sw&de=-%n@pQLe)6=-*l zL9g#B`#0=YSN^bHyRK5bYM9W6BQH9DBduK!VbetsK3x(KP(uW$DWXaXB4%k(!~!kB z$Au1EMj978bOl*l>d;l>(CE-L1ZZ~XI*M56&@xI`?2yJJURdhT3S3;4eq3CUa$Q`N zVq9F4Vmw?In}=nwdC+3>up&=MVf1Lo`(Kw{8u1fFC>Std@mq}e$V_=!9&#dqy1;?M#KA)mGHYtx#YWWn9?RbV2JURa)0}jPort>X50J0DQ#AG=N>8F zD{^B|_OvL%sUqU}r5tzmbizLt7Gs>S?-*X64Z_c--FA%K9bPv_+k<^(T?(gJX)BY>4G#Cf3})1tk_v99m1?H6-FbVkv)wt^-MKmF^t&r}cOG;Px`&Xp z=@mP8S1r7CZ||M%!6D>}FYdknpxcE)?Zw0{cYL^axVzKuz7NItz0P50=V0%Ht{O`` z{)7MJ@!#st_P9{rzGRsd1XM7FDw?QSsbJsf+sO)Q9=uK;%||WVki-cq9sK6p+DIFi z@PO$ojylHI0E{2$)R2h{%f2#HUiwvs<2$Yw^H~Wk4?KO~DTAH^ny1kNg*|3R68c%9 z97!b45*6>)f1m#Kk(FRmIOPJcsuZSi+6A4>NTG%`7cAzZR2VWBY*v@TWxVVHvRNt2 z;hYN&o0mcZ8!oh%BLx>-7uMMVma*Cfpk<~FIxW}+ilc&sA`~4ZcpR!;=+U zpri-Ko__o(Tg2+k(dZK$7rpP`_~__}HL-+E{$5FVmV{?Xc-q3#=AJs*!qN^cv4|5y zNM#yP)i|Lqj)+h;HZ*)C_@z3fszWXKc5%b!4F8`+B9f3Sj7UPVNXk1g(c~;rQb=Q( zv#_L)!A!^^E0rAPIE%a#3RvJQic%`?Vx8bf%YlSD1oStMcjVZxm`i#X4#yJF_CG?C7&$#eR2+suf)G;9}6QO1B z&59j7TQJsYL

L{EQ1+w#C6Gtzp9y$d%=(G`FV%Au$97zQWJ+&0#5a*d2>`!WBs zHO9!SPG)t_mS(`s>OG{KXNS#2(WzOTo7KlB&9bVP!L#Kb6m$GlYs`b&*Pc_*dL^b& zuzG!=r zjcX1%URVwdPwBG;A#Zqa>#TkpifD+UbBbAP3R;_a-(fVFXe_xN6;fCYY!OY?yol)v zRnZbzE_s@)q6Lb=(Ht5#!>C8y%+IyNt7wT{I-mVXWkN9wl9tJ5`P?VCzcC-IZf3Sz q*1{IMiojM-VXL%j^}+#GC{p_2PhFLdr-$Jgw#pBqxWSzpPyYj>1*#VS diff --git a/src/wasm/Hacl_Hash_Blake2b_Simd256.wasm b/src/wasm/Hacl_Hash_Blake2b_Simd256.wasm index 2724451529574975575f03f42e29974896d5d9f5..d144e065bcafff0663829218e54e65f227f2eb9a 100644 GIT binary patch delta 697 zcmZva&1(};6vgkGkIc+_FUjPyV-$L)0b>^m7Mn^|eO(Ar{ROz>socqrF=A1jTy{ymXAsP-C0I(-_ zCljbn^DSmhH;(LGPFQ1SLZht5h0NyiexX<@S27RM85@H-$Oz~2JrZ0O%%e{CMt21K zxNaMvGq}|sbwTPYM#IP=jcVQ?>i4%c!Jki+d$Tj_jG&Mnp#eqx-B@5H!k|%Zqy~M_ z%#|zNwP+Nzhuzy9lktZy(=2+++^z4XNe$zUGgfc{Aas-)y6E~&ZKjp0n2!9~84tZR zSOGv9^g`EMr5jkcR%b5sNV)wOy+!!Pr1$*tF)!o@p{Rp;#A4qDbQFYkm_ZveiLfqY zd|5+4h6b9LMa$3X>%13;8d?g@(PfR%ni(Y8eolYo>nx|6Vw*kJ^RnW-NY_RMxZW2n z%B0?c&udy1gRvP}#I%}J%cGZa3eA;!{xGzH78h%9+eg1+kM&zgF1)<9n3w= eul1u|XX$EtX}Nid{w=)`O!d3D$H|Z28T$pe!HD$$ delta 367 zcmbPi(PhfdkXW3{$iTqBIH9qjfkAX4ztzToV~py(4L}|bCl@ylFCV{vppdW#TRTXI zv7W*4BO5pSmL?#FQ*H7FCaKB%%xshEnb?`QRbgEA_?*Pz5(b{h>zS08_~k*u>_w@? zsU-{oKp|kdADFp@vMg=AXW(8ga76G2grrg<#T$2}ai!t&}-ofn+Bw2X6 zm?kk!p1>o*c@yM(CZ;;}$!mG6*c3UH85~zk?&lHRe1i826C?NJy#f~)xi|L-Ze|8@ zoJ8*fIZ9$@7#a6YelMQObeRQY$mAjk38gEnSpuvK3S5p1iX4iZ3LFZYW(*A6+&|ht g!NZ`;!2NOZ8i^T`&q=Of0r`1yz0}0bRZ=q<0iKCtHUIzs diff --git a/src/wasm/Hacl_Hash_Blake2s.wasm b/src/wasm/Hacl_Hash_Blake2s.wasm index 0dcaff92fc840a8a399b6410fb14041d83f27baf..f9d6889edb710d8b1845bb009261b1fd960d68cb 100644 GIT binary patch delta 2058 zcmaJ>%WoTH5dZe!WWBq#*Y?_8Z~U0C>)LJN=F!$|leEnjL6nL^#T5=wL{kyh1*sc^ zKmrE}syKkuu2hK&f&<4MP*ene0umQa9MfBma6v+ilwQ0Y*U6p?km>x zm!Fbi7B{!w8f|hba<%Nl}z}L>Q{25 zmox6{1Uq{o?@@}U3OBs2`F`O)kZ<1E*%8I?u7++Yd1HHTdn?%ea7RBfd&}Pv>)lxkjg;ngGlK+suY;ODM-LnkAjnsl!E>MMcqO} zTcbbXHGM6Lh9qhFpbtsH9S0plu*1VvVhBkzL7)Xf>Z5Xfv`IOt-NzIu_6tv1{_*_B zzdmwI%s_@)c49*ovK^sgE;i&L-w_77v7rEkj~$g>-1*iSW->bJ&8G1a7s(Kne+ES*RnSv5}dGgu!BkSc)RkBawj? zU{5$CPV|HeEj&rf(xyh*QoZ(KtIY9^bbtb+H{izT8!l2DZ7LB-I-Y`4yd0^Y*YKiH@j4zd#u0|W6=}?;CYGaui zi^9Su^=6N5Aeyv9rZnO$C0ifNKHo!WL<>pU(bF+`H_Y%`a9>TrG!IPCk1o*SC($A- zo3L!@#aM!|>^_k1XxT|S!mcmds#7^S4Y4c6eG)|hP-L%aL_R?7`OIM4;eGfzt6tQV45GeWW-HX}jPwzdQXudbY$}+8VCTr*Nj$0@`@h(Ap%H)ij>%Ym`^IzN-1YTRa`UFdm!^Cod0fj)&)Gy~*IE z*UrvZA$+I_x0rl!csU$Q?!S4a@zgIr(AVeRY4y_l?`lD_(>^fZkN^$Rpxc>H)NhH)LG_7V{dXavj^J2B9NX23iV#6SyL0jr2Ml%axq!(pq> zbR3}13><2GW`Pge)bnA7dOify^P!DB+VG{=g^HXFW)BS5l`xr1L z#pfwLPw{z*&x`n=2y3XeCUkU6C$)clpwx!&*`p`dbh05*I|au7t8K0{VzsS!*J((T z+L;*0LYCCFV;~1PQac|54mhN?y9B1900mOJxJ2+Alt^ta2Fg$-wO3rOhzgjHA$3f! zNl_E>q^Jolsck`#dKP%pL-na=kv$8wHz6Zuw@B?xrS=viNoIya#FtcjNyV2^d@043 zQhce1FXWASzlwT;b^;)cJNk)as0<)B0vKIU601GNp28kTJ`xaq)o`Dw!aDsN&VF>A z*-#Y%xoXH&lU~flh^saN>`5;Q2UZKN8gbQ-%bDBFZxTEme&70tr{|aLH#97M+WQ2H z54&Ge@qDjuWPAx?8@9u5k2mID^#0_@9~WWlgkK*0F%>yTOMMxyJQi^X9W3Ib5ZkC+ zWUDUft1cR=E?TQuwK1#H=>EVsM`3kW!|MGPR+ZHX?!{5|ILaPJ+2cg^fU9hEDLpz? zm_`E!OrwP{!QeHZC}(sM8o)7k&xsw#QK}dY(ecSCzMSIAE55wq%PYRT;E33 zgl&_|cvqmzJ4XQoGBUm?`MGGKyG_aNE>B8;72bCcEF4vnLUHWg3k2RSc(t^oO^)C! zu?uO@xoXiK9+)mtcBLL4m^M{3KO*i#6<(-}w$-Hr(?xpc7+3D&%Ka-Vcd3e3?nTvd xVE_O#9aY!V*72dx7_+Qu9dF%iH diff --git a/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm b/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm index 6d5cdb7834f987142bb56a6a2aec4f62a473dfd3..fd3c1b86281cdfbf43173dd093717f4f2d448d64 100644 GIT binary patch delta 746 zcmZvaKWG#|7{$NYKexLxvv+&{;(0+blYnPolmv-#Hkm*K5nD@-Ll3#lo)Pa-DCnLd z5mE%=NK{aXg=Gpou(LN>Sc$Dwx@cu7_{{}#1nlO`y#3Ah-t6kh_0u_Mb~^+B$g>Ci zK8$VT>b9Mx?-oCk+Ik;%!hkX!W^+-#P%M=*kIf!QLG5Ok_w6AHs`K`^*S^{A0X?o8 zmh?I+Nv{oDU$tshC~&J9bec)Bd<~)rqx{>gZmS1*b0jP%=%3akDI!K#rJ6D5%XYR@ z4z4$Q%|^FPDIz-_5Yv3LefusT?n*#NO#E}ca z8@UF|Ici6|DnJln30s81i9&tGJ5QLt<&7Q5y6V@T55Mm*S2)I`U|YCRR=@JrCSF=Y zoQpVHI{2}JgRs}8Y0OOZ1;370DG|}C{t;Btkx61QNih*s{>6ht3SvQHlT!H!He=K@ zKpiWfio}v0x`-2(For#NbKsk*w$DO@VHvtOqPeOZpuwPLcr5TxRrcBZADbIC{|{RY z*8DFP;ra^OBz&;P?${`9+ksv`J@*M(~~>yunaS!k57janKIbkDz-sj p3fQiGT6i8UB+aGPY_~Bpd+wC^5%orSMSm=g>xJUxpi_E5{sJ0zkAnaJ delta 398 zcmXw#&npE{6vxlKcjnEHJ7eaV8H#Wp@it_qP-^}I|G{g@(8Jrjc_|9R0x7~RVIdo3 zBQ&zJRZ=!ASjgH+Z=s}=dtcd|&$sXScFuQuaIN2gD5)R-KszgG3c;>3ay5R3id(55 z%P^^BH#u&;+4FPjQbNlR)^k+5UKL>U@k`vs9g^eASi`jUAGOkKl+?iD7q}ZcT~gMn zaT3?S6;iv{&`@4Z#zc847S86*hG()G$KVwkF>|EJ_l=3D7L}6nLQIf(bS!V;2jnDs zQv%}BHU_iZq3A&Q#atGx}!N9-4MlU}^{j zQc%RQG6XcfW(e3DM~Due4BA0 zBgbS1?oW*DlM9&VGO|v-$Rh@1zu-v_XO(hf&|qM2Fk$r39@ Kj?Gtvlb8T7_*s4c delta 274 zcmdn4vt4I`6fa{vV|@a1eLaX`oG2?fF-dV^V%NkI!Yqbn3``U6D>5=q+#|`An3I{F z7jI_5z%rSUQGtN_Gi41Id3Na&tYWFe8u?CJYjqC2Ynz qg)vKjeXeZ@wy=!~_6exkRb} diff --git a/src/wasm/INFO.txt b/src/wasm/INFO.txt index d2f1192f..63672310 100644 --- a/src/wasm/INFO.txt +++ b/src/wasm/INFO.txt @@ -1,4 +1,4 @@ This code was generated with the following toolchain. -F* version: 6e23042e74555544267731295b7d382c86edc574 -Karamel version: a7be2a7c43eca637ceb57fe8f3ffd16fc6627ebd +F* version: 2fd9303b78e9161bc7cc487ab1b99e5b516138ad +Karamel version: abb38e1d6fbbb2e09603394a0ce2eebe2d536b67 Vale version: 0.3.19 diff --git a/src/wasm/layouts.json b/src/wasm/layouts.json index 81273a66..d62bb7ad 100644 --- a/src/wasm/layouts.json +++ b/src/wasm/layouts.json @@ -1 +1 @@ -{"Spec_Hash_Definitions_hash_alg":["LEnum"],"Prims_string":["LBuiltin",["I32"],["A32"]],"Prims_int":["LBuiltin",["I32"],["A32"]],"K___uint32_t_uint32_t":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Int",["A32"]]]]]}],"__bool_bool_bool_bool":["LFlat",{"size":4,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]],["thd",[2,["Int",["A8"]]]],["f3",[3,["Int",["A8"]]]]]}],"__bool_bool":["LFlat",{"size":2,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]]]}],"Hacl_Streaming_Types_error_code":["LEnum"],"Hacl_MAC_Poly1305_state_t":["LFlat",{"size":20,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]],["p_key",[16,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Streaming_MD_state_64":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Streaming_MD_state_32":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A32"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Hash_SHA3_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_SHA3_hash_buf"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"hash_buf2":["LFlat",{"size":16,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA3_hash_buf"]]],["snd",[8,["Layout","Hacl_Hash_SHA3_hash_buf"]]]]}],"Hacl_Hash_SHA3_hash_buf":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Hash_Blake2s_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2s_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2s_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A32"]]]]],["snd",[4,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Hash_Blake2s_Simd128_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2s_Simd128_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2s_Simd128_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Hash_Blake2b_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2b_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2b_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A64"]]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Hash_Blake2b_Simd256_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2b_Simd256_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2b_Simd256_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Hash_SHA2_uint8_8p":["LFlat",{"size":56,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_7p"]]]]}],"Hacl_Hash_SHA2_uint8_7p":["LFlat",{"size":48,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_6p"]]]]}],"Hacl_Hash_SHA2_uint8_6p":["LFlat",{"size":40,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_5p"]]]]}],"Hacl_Hash_SHA2_uint8_5p":["LFlat",{"size":32,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_4p"]]]]}],"Hacl_Hash_SHA2_uint8_4p":["LFlat",{"size":24,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_3p"]]]]}],"Hacl_Hash_SHA2_uint8_3p":["LFlat",{"size":16,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_2p"]]]]}],"Hacl_Hash_SHA2_uint8_2x8p":["LFlat",{"size":112,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA2_uint8_8p"]]],["snd",[56,["Layout","Hacl_Hash_SHA2_uint8_8p"]]]]}],"Hacl_Hash_SHA2_uint8_2x4p":["LFlat",{"size":48,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA2_uint8_4p"]]],["snd",[24,["Layout","Hacl_Hash_SHA2_uint8_4p"]]]]}],"Hacl_Hash_SHA2_uint8_2p":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[4,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Impl_HPKE_context_s":["LFlat",{"size":16,"fields":[["ctx_key",[0,["Pointer",["Int",["A8"]]]]],["ctx_nonce",[4,["Pointer",["Int",["A8"]]]]],["ctx_seq",[8,["Pointer",["Int",["A64"]]]]],["ctx_exporter",[12,["Pointer",["Int",["A8"]]]]]]}],"Hacl_HMAC_DRBG_state":["LFlat",{"size":12,"fields":[["k",[0,["Pointer",["Int",["A8"]]]]],["v",[4,["Pointer",["Int",["A8"]]]]],["reseed_counter",[8,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64":["LFlat",{"size":20,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A64"]]]]],["mu",[8,["Int",["A64"]]]],["r2",[16,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32":["LFlat",{"size":16,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A32"]]]]],["mu",[8,["Int",["A32"]]]],["r2",[12,["Pointer",["Int",["A32"]]]]]]}],"FStar_UInt128_uint128":["LFlat",{"size":16,"fields":[["low",[0,["Int",["A64"]]]],["high",[8,["Int",["A64"]]]]]}],"EverCrypt_Hash_Incremental_state_t":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Layout","EverCrypt_Hash_state_s"]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"state_s_tags":["LEnum"],"EverCrypt_Hash_state_s":["LFlat",{"size":12,"fields":[["tag",[0,["Int",["A32"]]]],["val",[8,["Union",[["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A32"]]],["Pointer",["Unknown"]],["Pointer",["Int",["A64"]]],["Pointer",["Unknown"]]]]]]]}],"EverCrypt_Error_error_code":["LEnum"],"C_String_t_":["LBuiltin",["I32"],["A32"]],"C_String_t":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t_":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t":["LBuiltin",["I32"],["A32"]],"exit_code":["LBuiltin",["I32"],["A32"]],"clock_t":["LBuiltin",["I32"],["A32"]]} \ No newline at end of file +{"Spec_Hash_Definitions_hash_alg":["LEnum"],"Prims_string":["LBuiltin",["I32"],["A32"]],"Prims_int":["LBuiltin",["I32"],["A32"]],"K___uint32_t_uint32_t":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Int",["A32"]]]]]}],"__bool_bool_bool_bool":["LFlat",{"size":4,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]],["thd",[2,["Int",["A8"]]]],["f3",[3,["Int",["A8"]]]]]}],"__bool_bool":["LFlat",{"size":2,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]]]}],"Hacl_Streaming_Types_error_code":["LEnum"],"Hacl_MAC_Poly1305_state_t":["LFlat",{"size":20,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]],["p_key",[16,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Streaming_MD_state_64":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Streaming_MD_state_32":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A32"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Hash_SHA3_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_SHA3_hash_buf"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"hash_buf2":["LFlat",{"size":16,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA3_hash_buf"]]],["snd",[8,["Layout","Hacl_Hash_SHA3_hash_buf"]]]]}],"Hacl_Hash_SHA3_hash_buf":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Hash_Blake2s_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2s_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2s_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A32"]]]]],["snd",[4,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Hash_Blake2s_Simd128_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2s_Simd128_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2s_Simd128_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Hash_Blake2b_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2b_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2b_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A64"]]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Hash_Blake2b_Simd256_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2b_Simd256_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2b_Simd256_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Hash_SHA2_uint8_8p":["LFlat",{"size":56,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_7p"]]]]}],"Hacl_Hash_SHA2_uint8_7p":["LFlat",{"size":48,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_6p"]]]]}],"Hacl_Hash_SHA2_uint8_6p":["LFlat",{"size":40,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_5p"]]]]}],"Hacl_Hash_SHA2_uint8_5p":["LFlat",{"size":32,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_4p"]]]]}],"Hacl_Hash_SHA2_uint8_4p":["LFlat",{"size":24,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_3p"]]]]}],"Hacl_Hash_SHA2_uint8_3p":["LFlat",{"size":16,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_2p"]]]]}],"Hacl_Hash_SHA2_uint8_2x8p":["LFlat",{"size":112,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA2_uint8_8p"]]],["snd",[56,["Layout","Hacl_Hash_SHA2_uint8_8p"]]]]}],"Hacl_Hash_SHA2_uint8_2x4p":["LFlat",{"size":48,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA2_uint8_4p"]]],["snd",[24,["Layout","Hacl_Hash_SHA2_uint8_4p"]]]]}],"Hacl_Hash_SHA2_uint8_2p":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[4,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Impl_HPKE_context_s":["LFlat",{"size":16,"fields":[["ctx_key",[0,["Pointer",["Int",["A8"]]]]],["ctx_nonce",[4,["Pointer",["Int",["A8"]]]]],["ctx_seq",[8,["Pointer",["Int",["A64"]]]]],["ctx_exporter",[12,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Hash_Blake2s_blake2s_params":["LFlat",{"size":24,"fields":[["digest_length",[0,["Int",["A8"]]]],["key_length",[1,["Int",["A8"]]]],["fanout",[2,["Int",["A8"]]]],["depth",[3,["Int",["A8"]]]],["leaf_length",[4,["Int",["A32"]]]],["node_offset",[8,["Int",["A32"]]]],["xof_length",[12,["Int",["A16"]]]],["node_depth",[14,["Int",["A8"]]]],["inner_length",[15,["Int",["A8"]]]],["salt",[16,["Pointer",["Int",["A8"]]]]],["personal",[20,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Hash_Blake2s_blake2b_params":["LFlat",{"size":28,"fields":[["digest_length1",[0,["Int",["A8"]]]],["key_length1",[1,["Int",["A8"]]]],["fanout1",[2,["Int",["A8"]]]],["depth1",[3,["Int",["A8"]]]],["leaf_length1",[4,["Int",["A32"]]]],["node_offset1",[8,["Int",["A32"]]]],["xof_length1",[12,["Int",["A32"]]]],["node_depth1",[16,["Int",["A8"]]]],["inner_length1",[17,["Int",["A8"]]]],["salt1",[20,["Pointer",["Int",["A8"]]]]],["personal1",[24,["Pointer",["Int",["A8"]]]]]]}],"Hacl_HMAC_DRBG_state":["LFlat",{"size":12,"fields":[["k",[0,["Pointer",["Int",["A8"]]]]],["v",[4,["Pointer",["Int",["A8"]]]]],["reseed_counter",[8,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64":["LFlat",{"size":20,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A64"]]]]],["mu",[8,["Int",["A64"]]]],["r2",[16,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32":["LFlat",{"size":16,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A32"]]]]],["mu",[8,["Int",["A32"]]]],["r2",[12,["Pointer",["Int",["A32"]]]]]]}],"FStar_UInt128_uint128":["LFlat",{"size":16,"fields":[["low",[0,["Int",["A64"]]]],["high",[8,["Int",["A64"]]]]]}],"EverCrypt_Hash_Incremental_state_t":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Layout","EverCrypt_Hash_state_s"]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"state_s_tags":["LEnum"],"EverCrypt_Hash_state_s":["LFlat",{"size":12,"fields":[["tag",[0,["Int",["A32"]]]],["val",[8,["Union",[["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A32"]]],["Pointer",["Unknown"]],["Pointer",["Int",["A64"]]],["Pointer",["Unknown"]]]]]]]}],"EverCrypt_Error_error_code":["LEnum"],"C_String_t_":["LBuiltin",["I32"],["A32"]],"C_String_t":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t_":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t":["LBuiltin",["I32"],["A32"]],"exit_code":["LBuiltin",["I32"],["A32"]],"clock_t":["LBuiltin",["I32"],["A32"]]} \ No newline at end of file From 495cb2fbf835b1b30cbabbd49b98dc1a2a789059 Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Thu, 25 Apr 2024 16:15:14 +0200 Subject: [PATCH 02/10] update to hacl-star/hacl-star#940 --- include/Hacl_Hash_SHA3_Simd256.h | 24 +------ include/Hacl_SHA2_Types.h | 25 +++++-- include/Hacl_SHA2_Vec128.h | 2 +- include/Hacl_SHA2_Vec256.h | 2 +- include/internal/Hacl_SHA2_Types.h | 6 +- ocaml/ctypes.depend | 10 +-- ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml | 32 --------- ocaml/lib/Hacl_SHA2_Types_bindings.ml | 53 +++++++++------ src/Hacl_Hash_SHA3_Simd256.c | 68 ++++++++++---------- src/Hacl_SHA2_Vec128.c | 60 +++++++---------- src/Hacl_SHA2_Vec256.c | 60 +++++++---------- tests/sha3.cc | 16 +++-- 12 files changed, 158 insertions(+), 200 deletions(-) diff --git a/include/Hacl_Hash_SHA3_Simd256.h b/include/Hacl_Hash_SHA3_Simd256.h index fc2b03b7..617e8e34 100644 --- a/include/Hacl_Hash_SHA3_Simd256.h +++ b/include/Hacl_Hash_SHA3_Simd256.h @@ -35,33 +35,13 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_SHA2_Types.h" #include "libintvector.h" -typedef struct K____uint8_t___uint8_t__s -{ - uint8_t *fst; - uint8_t *snd; -} -K____uint8_t___uint8_t_; - -typedef struct K____uint8_t__K____uint8_t___uint8_t__s -{ - uint8_t *fst; - K____uint8_t___uint8_t_ snd; -} -K____uint8_t__K____uint8_t___uint8_t_; - -typedef struct K____uint8_t___uint8_t____K____uint8_t___uint8_t__s -{ - uint8_t *fst; - K____uint8_t__K____uint8_t___uint8_t_ snd; -} -K____uint8_t___uint8_t____K____uint8_t___uint8_t_; - void Hacl_Hash_SHA3_Simd256_absorb_inner_256( uint32_t rateInBytes, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *s ); diff --git a/include/Hacl_SHA2_Types.h b/include/Hacl_SHA2_Types.h index d4260d77..da2a6886 100644 --- a/include/Hacl_SHA2_Types.h +++ b/include/Hacl_SHA2_Types.h @@ -35,13 +35,30 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" -#include "Hacl_Hash_SHA3_Simd256.h" +typedef struct Hacl_Hash_SHA2_uint8_2p_s +{ + uint8_t *fst; + uint8_t *snd; +} +Hacl_Hash_SHA2_uint8_2p; + +typedef struct Hacl_Hash_SHA2_uint8_3p_s +{ + uint8_t *fst; + Hacl_Hash_SHA2_uint8_2p snd; +} +Hacl_Hash_SHA2_uint8_3p; -typedef K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_2p; +typedef struct Hacl_Hash_SHA2_uint8_4p_s +{ + uint8_t *fst; + Hacl_Hash_SHA2_uint8_3p snd; +} +Hacl_Hash_SHA2_uint8_4p; -typedef K____uint8_t__K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_3p; +typedef uint8_t *Hacl_Hash_SHA2_bufx1; -typedef K____uint8_t___uint8_t____K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_4p; +typedef Hacl_Hash_SHA2_uint8_4p Hacl_Hash_SHA2_bufx4; #if defined(__cplusplus) } diff --git a/include/Hacl_SHA2_Vec128.h b/include/Hacl_SHA2_Vec128.h index fa6aa99b..c5df2075 100644 --- a/include/Hacl_SHA2_Vec128.h +++ b/include/Hacl_SHA2_Vec128.h @@ -35,7 +35,7 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" -#include "Hacl_Hash_SHA3_Simd256.h" +#include "Hacl_SHA2_Types.h" void Hacl_SHA2_Vec128_sha224_4( diff --git a/include/Hacl_SHA2_Vec256.h b/include/Hacl_SHA2_Vec256.h index 734c6ddd..7e41314a 100644 --- a/include/Hacl_SHA2_Vec256.h +++ b/include/Hacl_SHA2_Vec256.h @@ -35,8 +35,8 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_SHA2_Types.h" #include "Hacl_Krmllib.h" -#include "Hacl_Hash_SHA3_Simd256.h" void Hacl_SHA2_Vec256_sha224_8( diff --git a/include/internal/Hacl_SHA2_Types.h b/include/internal/Hacl_SHA2_Types.h index 3f07c80f..dcb276aa 100644 --- a/include/internal/Hacl_SHA2_Types.h +++ b/include/internal/Hacl_SHA2_Types.h @@ -40,7 +40,7 @@ extern "C" { typedef struct Hacl_Hash_SHA2_uint8_5p_s { uint8_t *fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; + Hacl_Hash_SHA2_uint8_4p snd; } Hacl_Hash_SHA2_uint8_5p; @@ -67,8 +67,8 @@ Hacl_Hash_SHA2_uint8_8p; typedef struct Hacl_Hash_SHA2_uint8_2x4p_s { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; + Hacl_Hash_SHA2_uint8_4p fst; + Hacl_Hash_SHA2_uint8_4p snd; } Hacl_Hash_SHA2_uint8_2x4p; diff --git a/ocaml/ctypes.depend b/ocaml/ctypes.depend index 8da61ba0..79cea4b2 100644 --- a/ocaml/ctypes.depend +++ b/ocaml/ctypes.depend @@ -1,4 +1,4 @@ -CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx +CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx lib/Hacl_Streaming_Types_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmo: lib_gen/Hacl_Streaming_Types_gen.cmx: lib/Hacl_Streaming_Types_bindings.cmx @@ -51,6 +51,10 @@ lib/Hacl_Hash_SHA3_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_ lib/Hacl_Hash_SHA3_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_SHA3_gen.cmx: lib/Hacl_Hash_SHA3_bindings.cmx lib_gen/Hacl_Hash_SHA3_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_SHA3_bindings.cmx lib_gen/Hacl_Hash_SHA3_gen.cmx +lib/Hacl_SHA2_Types_bindings.cmx: +lib/Hacl_SHA2_Types_bindings.cmo: +lib_gen/Hacl_SHA2_Types_gen.cmx: lib/Hacl_SHA2_Types_bindings.cmx +lib_gen/Hacl_SHA2_Types_gen.exe: lib/Hacl_SHA2_Types_bindings.cmx lib_gen/Hacl_SHA2_Types_gen.cmx lib/Hacl_Hash_SHA3_Simd256_bindings.cmx: lib/Hacl_Hash_SHA3_Simd256_bindings.cmo: lib_gen/Hacl_Hash_SHA3_Simd256_gen.cmx: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx @@ -59,10 +63,6 @@ lib/Hacl_Hash_MD5_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_S lib/Hacl_Hash_MD5_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Hash_MD5_gen.cmx: lib/Hacl_Hash_MD5_bindings.cmx lib_gen/Hacl_Hash_MD5_gen.exe: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_c_stubs.o lib/Hacl_Hash_MD5_bindings.cmx lib_gen/Hacl_Hash_MD5_gen.cmx -lib/Hacl_SHA2_Types_bindings.cmx: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx -lib/Hacl_SHA2_Types_bindings.cmo: lib/Hacl_Hash_SHA3_Simd256_bindings.cmo lib/Hacl_Hash_SHA3_Simd256_stubs.cmo -lib_gen/Hacl_SHA2_Types_gen.cmx: lib/Hacl_SHA2_Types_bindings.cmx -lib_gen/Hacl_SHA2_Types_gen.exe: lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx lib/Hacl_Hash_SHA3_Simd256_c_stubs.o lib/Hacl_SHA2_Types_bindings.cmx lib_gen/Hacl_SHA2_Types_gen.cmx lib/EverCrypt_Error_bindings.cmx: lib/EverCrypt_Error_bindings.cmo: lib_gen/EverCrypt_Error_gen.cmx: lib/EverCrypt_Error_bindings.cmx diff --git a/ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml b/ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml index c5e160c6..a04d1206 100644 --- a/ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml +++ b/ocaml/lib/Hacl_Hash_SHA3_Simd256_bindings.ml @@ -2,38 +2,6 @@ open Ctypes module Bindings(F:Cstubs.FOREIGN) = struct open F - type k____uint8_t___uint8_t_ = [ `k____uint8_t___uint8_t_ ] structure - let (k____uint8_t___uint8_t_ : - [ `k____uint8_t___uint8_t_ ] structure typ) = - structure "K____uint8_t___uint8_t__s" - let k____uint8_t___uint8_t__fst = - field k____uint8_t___uint8_t_ "fst" (ptr uint8_t) - let k____uint8_t___uint8_t__snd = - field k____uint8_t___uint8_t_ "snd" (ptr uint8_t) - let _ = seal k____uint8_t___uint8_t_ - type k____uint8_t__K____uint8_t___uint8_t_ = - [ `k____uint8_t__K____uint8_t___uint8_t_ ] structure - let (k____uint8_t__K____uint8_t___uint8_t_ : - [ `k____uint8_t__K____uint8_t___uint8_t_ ] structure typ) = - structure "K____uint8_t__K____uint8_t___uint8_t__s" - let k____uint8_t__K____uint8_t___uint8_t__fst = - field k____uint8_t__K____uint8_t___uint8_t_ "fst" (ptr uint8_t) - let k____uint8_t__K____uint8_t___uint8_t__snd = - field k____uint8_t__K____uint8_t___uint8_t_ "snd" - k____uint8_t___uint8_t_ - let _ = seal k____uint8_t__K____uint8_t___uint8_t_ - type k____uint8_t___uint8_t____K____uint8_t___uint8_t_ = - [ `k____uint8_t___uint8_t____K____uint8_t___uint8_t_ ] structure - let (k____uint8_t___uint8_t____K____uint8_t___uint8_t_ : - [ `k____uint8_t___uint8_t____K____uint8_t___uint8_t_ ] structure typ) = - structure "K____uint8_t___uint8_t____K____uint8_t___uint8_t__s" - let k____uint8_t___uint8_t____K____uint8_t___uint8_t__fst = - field k____uint8_t___uint8_t____K____uint8_t___uint8_t_ "fst" - (ptr uint8_t) - let k____uint8_t___uint8_t____K____uint8_t___uint8_t__snd = - field k____uint8_t___uint8_t____K____uint8_t___uint8_t_ "snd" - k____uint8_t__K____uint8_t___uint8_t_ - let _ = seal k____uint8_t___uint8_t____K____uint8_t___uint8_t_ let hacl_Hash_SHA3_Simd256_shake128 = foreign "Hacl_Hash_SHA3_Simd256_shake128" (ocaml_bytes @-> diff --git a/ocaml/lib/Hacl_SHA2_Types_bindings.ml b/ocaml/lib/Hacl_SHA2_Types_bindings.ml index d2b67650..56fcc212 100644 --- a/ocaml/lib/Hacl_SHA2_Types_bindings.ml +++ b/ocaml/lib/Hacl_SHA2_Types_bindings.ml @@ -2,20 +2,33 @@ open Ctypes module Bindings(F:Cstubs.FOREIGN) = struct open F - module Hacl_Hash_SHA3_Simd256_applied = - (Hacl_Hash_SHA3_Simd256_bindings.Bindings)(Hacl_Hash_SHA3_Simd256_stubs) - open Hacl_Hash_SHA3_Simd256_applied - type hacl_Hash_SHA2_uint8_2p = k____uint8_t___uint8_t_ - let hacl_Hash_SHA2_uint8_2p = - typedef k____uint8_t___uint8_t_ "Hacl_Hash_SHA2_uint8_2p" - type hacl_Hash_SHA2_uint8_3p = k____uint8_t__K____uint8_t___uint8_t_ - let hacl_Hash_SHA2_uint8_3p = - typedef k____uint8_t__K____uint8_t___uint8_t_ "Hacl_Hash_SHA2_uint8_3p" - type hacl_Hash_SHA2_uint8_4p = - k____uint8_t___uint8_t____K____uint8_t___uint8_t_ - let hacl_Hash_SHA2_uint8_4p = - typedef k____uint8_t___uint8_t____K____uint8_t___uint8_t_ - "Hacl_Hash_SHA2_uint8_4p" + type hacl_Hash_SHA2_uint8_2p = [ `hacl_Hash_SHA2_uint8_2p ] structure + let (hacl_Hash_SHA2_uint8_2p : + [ `hacl_Hash_SHA2_uint8_2p ] structure typ) = + structure "Hacl_Hash_SHA2_uint8_2p_s" + let hacl_Hash_SHA2_uint8_2p_fst = + field hacl_Hash_SHA2_uint8_2p "fst" (ptr uint8_t) + let hacl_Hash_SHA2_uint8_2p_snd = + field hacl_Hash_SHA2_uint8_2p "snd" (ptr uint8_t) + let _ = seal hacl_Hash_SHA2_uint8_2p + type hacl_Hash_SHA2_uint8_3p = [ `hacl_Hash_SHA2_uint8_3p ] structure + let (hacl_Hash_SHA2_uint8_3p : + [ `hacl_Hash_SHA2_uint8_3p ] structure typ) = + structure "Hacl_Hash_SHA2_uint8_3p_s" + let hacl_Hash_SHA2_uint8_3p_fst = + field hacl_Hash_SHA2_uint8_3p "fst" (ptr uint8_t) + let hacl_Hash_SHA2_uint8_3p_snd = + field hacl_Hash_SHA2_uint8_3p "snd" hacl_Hash_SHA2_uint8_2p + let _ = seal hacl_Hash_SHA2_uint8_3p + type hacl_Hash_SHA2_uint8_4p = [ `hacl_Hash_SHA2_uint8_4p ] structure + let (hacl_Hash_SHA2_uint8_4p : + [ `hacl_Hash_SHA2_uint8_4p ] structure typ) = + structure "Hacl_Hash_SHA2_uint8_4p_s" + let hacl_Hash_SHA2_uint8_4p_fst = + field hacl_Hash_SHA2_uint8_4p "fst" (ptr uint8_t) + let hacl_Hash_SHA2_uint8_4p_snd = + field hacl_Hash_SHA2_uint8_4p "snd" hacl_Hash_SHA2_uint8_3p + let _ = seal hacl_Hash_SHA2_uint8_4p type hacl_Hash_SHA2_uint8_5p = [ `hacl_Hash_SHA2_uint8_5p ] structure let (hacl_Hash_SHA2_uint8_5p : [ `hacl_Hash_SHA2_uint8_5p ] structure typ) = @@ -23,8 +36,7 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_SHA2_uint8_5p_fst = field hacl_Hash_SHA2_uint8_5p "fst" (ptr uint8_t) let hacl_Hash_SHA2_uint8_5p_snd = - field hacl_Hash_SHA2_uint8_5p "snd" - k____uint8_t___uint8_t____K____uint8_t___uint8_t_ + field hacl_Hash_SHA2_uint8_5p "snd" hacl_Hash_SHA2_uint8_4p let _ = seal hacl_Hash_SHA2_uint8_5p type hacl_Hash_SHA2_uint8_6p = [ `hacl_Hash_SHA2_uint8_6p ] structure let (hacl_Hash_SHA2_uint8_6p : @@ -58,11 +70,9 @@ module Bindings(F:Cstubs.FOREIGN) = [ `hacl_Hash_SHA2_uint8_2x4p ] structure typ) = structure "Hacl_Hash_SHA2_uint8_2x4p_s" let hacl_Hash_SHA2_uint8_2x4p_fst = - field hacl_Hash_SHA2_uint8_2x4p "fst" - k____uint8_t___uint8_t____K____uint8_t___uint8_t_ + field hacl_Hash_SHA2_uint8_2x4p "fst" hacl_Hash_SHA2_uint8_4p let hacl_Hash_SHA2_uint8_2x4p_snd = - field hacl_Hash_SHA2_uint8_2x4p "snd" - k____uint8_t___uint8_t____K____uint8_t___uint8_t_ + field hacl_Hash_SHA2_uint8_2x4p "snd" hacl_Hash_SHA2_uint8_4p let _ = seal hacl_Hash_SHA2_uint8_2x4p type hacl_Hash_SHA2_uint8_2x8p = [ `hacl_Hash_SHA2_uint8_2x8p ] structure let (hacl_Hash_SHA2_uint8_2x8p : @@ -73,4 +83,7 @@ module Bindings(F:Cstubs.FOREIGN) = let hacl_Hash_SHA2_uint8_2x8p_snd = field hacl_Hash_SHA2_uint8_2x8p "snd" hacl_Hash_SHA2_uint8_8p let _ = seal hacl_Hash_SHA2_uint8_2x8p + type hacl_Hash_SHA2_bufx4 = hacl_Hash_SHA2_uint8_4p + let hacl_Hash_SHA2_bufx4 = + typedef hacl_Hash_SHA2_uint8_4p "Hacl_Hash_SHA2_bufx4" end \ No newline at end of file diff --git a/src/Hacl_Hash_SHA3_Simd256.c b/src/Hacl_Hash_SHA3_Simd256.c index 5dfbf960..131c34e6 100644 --- a/src/Hacl_Hash_SHA3_Simd256.c +++ b/src/Hacl_Hash_SHA3_Simd256.c @@ -30,7 +30,7 @@ void Hacl_Hash_SHA3_Simd256_absorb_inner_256( uint32_t rateInBytes, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *s ) { @@ -411,9 +411,9 @@ Hacl_Hash_SHA3_Simd256_shake128( uint32_t inputByteLen ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; uint32_t rateInBytes1 = 168U; @@ -423,7 +423,7 @@ Hacl_Hash_SHA3_Simd256_shake128( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint8_t *b3 = ib.snd.snd.snd; uint8_t *b2 = ib.snd.snd.fst; @@ -443,7 +443,7 @@ Hacl_Hash_SHA3_Simd256_shake128( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -735,7 +735,7 @@ Hacl_Hash_SHA3_Simd256_shake128( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b25 = b.snd.snd.fst; @@ -1340,9 +1340,9 @@ Hacl_Hash_SHA3_Simd256_shake256( uint32_t inputByteLen ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; uint32_t rateInBytes1 = 136U; @@ -1352,7 +1352,7 @@ Hacl_Hash_SHA3_Simd256_shake256( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint8_t *b3 = ib.snd.snd.snd; uint8_t *b2 = ib.snd.snd.fst; @@ -1372,7 +1372,7 @@ Hacl_Hash_SHA3_Simd256_shake256( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -1664,7 +1664,7 @@ Hacl_Hash_SHA3_Simd256_shake256( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b25 = b.snd.snd.fst; @@ -2268,9 +2268,9 @@ Hacl_Hash_SHA3_Simd256_sha3_224( uint32_t inputByteLen ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; uint32_t rateInBytes1 = 144U; @@ -2280,7 +2280,7 @@ Hacl_Hash_SHA3_Simd256_sha3_224( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint8_t *b3 = ib.snd.snd.snd; uint8_t *b2 = ib.snd.snd.fst; @@ -2300,7 +2300,7 @@ Hacl_Hash_SHA3_Simd256_sha3_224( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -2592,7 +2592,7 @@ Hacl_Hash_SHA3_Simd256_sha3_224( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b25 = b.snd.snd.fst; @@ -3196,9 +3196,9 @@ Hacl_Hash_SHA3_Simd256_sha3_256( uint32_t inputByteLen ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; uint32_t rateInBytes1 = 136U; @@ -3208,7 +3208,7 @@ Hacl_Hash_SHA3_Simd256_sha3_256( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint8_t *b3 = ib.snd.snd.snd; uint8_t *b2 = ib.snd.snd.fst; @@ -3228,7 +3228,7 @@ Hacl_Hash_SHA3_Simd256_sha3_256( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -3520,7 +3520,7 @@ Hacl_Hash_SHA3_Simd256_sha3_256( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b25 = b.snd.snd.fst; @@ -4124,9 +4124,9 @@ Hacl_Hash_SHA3_Simd256_sha3_384( uint32_t inputByteLen ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; uint32_t rateInBytes1 = 104U; @@ -4136,7 +4136,7 @@ Hacl_Hash_SHA3_Simd256_sha3_384( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint8_t *b3 = ib.snd.snd.snd; uint8_t *b2 = ib.snd.snd.fst; @@ -4156,7 +4156,7 @@ Hacl_Hash_SHA3_Simd256_sha3_384( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -4448,7 +4448,7 @@ Hacl_Hash_SHA3_Simd256_sha3_384( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b25 = b.snd.snd.fst; @@ -5052,9 +5052,9 @@ Hacl_Hash_SHA3_Simd256_sha3_512( uint32_t inputByteLen ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; uint32_t rateInBytes1 = 72U; @@ -5064,7 +5064,7 @@ Hacl_Hash_SHA3_Simd256_sha3_512( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint8_t *b3 = ib.snd.snd.snd; uint8_t *b2 = ib.snd.snd.fst; @@ -5084,7 +5084,7 @@ Hacl_Hash_SHA3_Simd256_sha3_512( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -5376,7 +5376,7 @@ Hacl_Hash_SHA3_Simd256_sha3_512( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b25 = b.snd.snd.fst; @@ -6016,7 +6016,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint8_t *b0 = input0; uint8_t *b1 = input1; @@ -6064,7 +6064,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % 168U; uint8_t *b01 = input0; @@ -6356,7 +6356,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b2 = b.snd.snd.fst; diff --git a/src/Hacl_SHA2_Vec128.c b/src/Hacl_SHA2_Vec128.c index 18f9a73a..02af75b1 100644 --- a/src/Hacl_SHA2_Vec128.c +++ b/src/Hacl_SHA2_Vec128.c @@ -42,10 +42,7 @@ static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha224_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec128 *hash -) +sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -298,7 +295,7 @@ sha224_update4( static inline void sha224_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -313,7 +310,7 @@ sha224_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update4(mb, st); } @@ -323,7 +320,7 @@ static inline void sha224_update_last4( uint64_t totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -377,13 +374,13 @@ sha224_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha224_update4(last0, hash); if (blocks > 1U) { @@ -393,10 +390,7 @@ sha224_update_last4( } static inline void -sha224_finish4( - Lib_IntVector_Intrinsics_vec128 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -491,9 +485,9 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha224_init4(st); @@ -509,7 +503,7 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update_last4(len_, rem, lb, st); sha224_finish4(st, rb); @@ -528,10 +522,7 @@ static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha256_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec128 *hash -) +sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -784,7 +775,7 @@ sha256_update4( static inline void sha256_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -799,7 +790,7 @@ sha256_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update4(mb, st); } @@ -809,7 +800,7 @@ static inline void sha256_update_last4( uint64_t totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -863,13 +854,13 @@ sha256_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha256_update4(last0, hash); if (blocks > 1U) { @@ -879,10 +870,7 @@ sha256_update_last4( } static inline void -sha256_finish4( - Lib_IntVector_Intrinsics_vec128 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -977,9 +965,9 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha256_init4(st); @@ -995,7 +983,7 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update_last4(len_, rem, lb, st); sha256_finish4(st, rb); diff --git a/src/Hacl_SHA2_Vec256.c b/src/Hacl_SHA2_Vec256.c index 4098d4c7..c34767f5 100644 --- a/src/Hacl_SHA2_Vec256.c +++ b/src/Hacl_SHA2_Vec256.c @@ -1541,10 +1541,7 @@ static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha384_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec256 *hash -) +sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -1781,7 +1778,7 @@ sha384_update4( static inline void sha384_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -1796,7 +1793,7 @@ sha384_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update4(mb, st); } @@ -1806,7 +1803,7 @@ static inline void sha384_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -1860,13 +1857,13 @@ sha384_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha384_update4(last0, hash); if (blocks > 1U) { @@ -1876,10 +1873,7 @@ sha384_update_last4( } static inline void -sha384_finish4( - Lib_IntVector_Intrinsics_vec256 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -1966,9 +1960,9 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha384_init4(st); @@ -1984,7 +1978,7 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update_last4(len_, rem, lb, st); sha384_finish4(st, rb); @@ -2003,10 +1997,7 @@ static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha512_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec256 *hash -) +sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -2243,7 +2234,7 @@ sha512_update4( static inline void sha512_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -2258,7 +2249,7 @@ sha512_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update4(mb, st); } @@ -2268,7 +2259,7 @@ static inline void sha512_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -2322,13 +2313,13 @@ sha512_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha512_update4(last0, hash); if (blocks > 1U) { @@ -2338,10 +2329,7 @@ sha512_update_last4( } static inline void -sha512_finish4( - Lib_IntVector_Intrinsics_vec256 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -2428,9 +2416,9 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha512_init4(st); @@ -2446,7 +2434,7 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update_last4(len_, rem, lb, st); sha512_finish4(st, rb); diff --git a/tests/sha3.cc b/tests/sha3.cc index 7f9aeabe..ed380df9 100644 --- a/tests/sha3.cc +++ b/tests/sha3.cc @@ -149,8 +149,8 @@ TEST(ApiSuite, ApiTest) uint32_t digest_size = 42; uint8_t digest[42]; - Hacl_Hash_SHA3_shake128_hacl( - message_size, (uint8_t*)message, digest_size, digest); + Hacl_Hash_SHA3_shake128( + digest, digest_size, (uint8_t*)message, message_size); // ANCHOR_END(example shake128) bytes expected_digest = @@ -201,16 +201,20 @@ TEST_P(ShakeKAT, TryKAT) if (test_case.md.size() == 128 / 8) { bytes digest(test_case.md.size(), 128 / 8); - Hacl_Hash_SHA3_shake128_hacl( - test_case.msg.size(), test_case.msg.data(), digest.size(), digest.data()); + Hacl_Hash_SHA3_shake128(digest.data(), + digest.size(), + test_case.msg.data(), + test_case.msg.size()); EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl << bytes_to_hex(digest) << std::endl; } else if (test_case.md.size() == 256 / 8) { bytes digest(test_case.md.size(), 256 / 8); - Hacl_Hash_SHA3_shake256_hacl( - test_case.msg.size(), test_case.msg.data(), digest.size(), digest.data()); + Hacl_Hash_SHA3_shake256(digest.data(), + digest.size(), + test_case.msg.data(), + test_case.msg.size()); EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl << bytes_to_hex(digest) << std::endl; From cbb80943e7ccf65b8ad2440becbc214805e2efe1 Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Thu, 25 Apr 2024 18:18:01 +0200 Subject: [PATCH 03/10] fixup benchmarks --- benchmarks/sha3.cc | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/benchmarks/sha3.cc b/benchmarks/sha3.cc index de744d8f..79b7c31b 100644 --- a/benchmarks/sha3.cc +++ b/benchmarks/sha3.cc @@ -27,7 +27,8 @@ static void Hacl_Sha3_224(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_224(digest224.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_224( + digest224.data(), (uint8_t*)input.data(), input.size()); } if (digest224 != expected_digest_sha3_224) { state.SkipWithError("Incorrect digest."); @@ -51,7 +52,8 @@ static void Hacl_Sha3_256(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_256(digest256.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_256( + digest256.data(), (uint8_t*)input.data(), input.size()); } if (digest256 != expected_digest_sha3_256) { state.SkipWithError("Incorrect digest."); @@ -102,7 +104,8 @@ static void Hacl_Sha3_384(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_384(digest384.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_384( + digest384.data(), (uint8_t*)input.data(), input.size()); } if (digest384 != expected_digest_sha3_384) { state.SkipWithError("Incorrect digest."); @@ -126,7 +129,8 @@ static void Hacl_Sha3_512(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_512(digest512.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_512( + digest512.data(), (uint8_t*)input.data(), input.size()); } if (digest512 != expected_digest_sha3_512) { state.SkipWithError("Incorrect digest."); @@ -242,8 +246,10 @@ static void Hacl_Sha3_shake128(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_shake128_hacl( - input.size(), (uint8_t*)input.data(), digest_shake.size(), digest_shake.data()); + Hacl_Hash_SHA3_shake128(digest_shake.data(), + digest_shake.size(), + (uint8_t*)input.data(), + input.size()); } } @@ -253,8 +259,10 @@ static void Hacl_Sha3_shake256(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_shake256_hacl( - input.size(), (uint8_t*)input.data(), digest_shake.size(), digest_shake.data()); + Hacl_Hash_SHA3_shake256(digest_shake.data(), + digest_shake.size(), + (uint8_t*)input.data(), + input.size()); } } From d316abe73cb1cc5b95db84f3f691dd6fb0032055 Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Mon, 29 Apr 2024 09:17:09 +0200 Subject: [PATCH 04/10] wip: still not updated upstream --- karamel/krmllib/dist/minimal/fstar_uint128_msvc.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h b/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h index cd1448dd..6ff658f5 100644 --- a/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h +++ b/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h @@ -217,7 +217,7 @@ static const uint32_t FStar_UInt128_u32_64 = (uint32_t)64U; inline static uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s) { - return (hi << s) + (lo >> FStar_UInt128_u32_64 - s); + return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s)); } inline static uint64_t @@ -241,7 +241,7 @@ inline static FStar_UInt128_uint128 FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s) { FStar_UInt128_uint128 lit; lit.low = (uint64_t)0U; - lit.high = a.low << s - FStar_UInt128_u32_64; + lit.high = a.low << (s - FStar_UInt128_u32_64); return lit; } @@ -267,7 +267,7 @@ FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s) { inline static uint64_t FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s) { - return (lo >> s) + (hi << FStar_UInt128_u32_64 - s); + return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s)); } inline static uint64_t @@ -290,7 +290,7 @@ FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s) { inline static FStar_UInt128_uint128 FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s) { FStar_UInt128_uint128 lit; - lit.low = a.high >> s - FStar_UInt128_u32_64; + lit.low = a.high >> (s - FStar_UInt128_u32_64); lit.high = (uint64_t)0U; return lit; } @@ -488,7 +488,7 @@ FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y) { u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_), w3); lit.high = x_ * (y >> FStar_UInt128_u32_32) + (t_ >> FStar_UInt128_u32_32) + - (u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_) >> + ((u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_)) >> FStar_UInt128_u32_32); return lit; } From e284aaedf8c26616b23c6645b94b6b2e6c0e761e Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Wed, 8 May 2024 14:25:59 +0200 Subject: [PATCH 05/10] update to 9109b711e7d24e9e0f35967180025597a23fcb43 --- include/msvc/Hacl_Hash_Blake2b.h | 122 +- include/msvc/Hacl_Hash_Blake2b_Simd256.h | 69 +- include/msvc/Hacl_Hash_Blake2s.h | 65 +- include/msvc/Hacl_Hash_Blake2s_Simd128.h | 71 +- include/msvc/Hacl_Hash_SHA3.h | 2 + include/msvc/Hacl_Hash_SHA3_Simd256.h | 27 +- include/msvc/Hacl_SHA2_Types.h | 25 +- include/msvc/Hacl_SHA2_Vec128.h | 2 +- include/msvc/Hacl_SHA2_Vec256.h | 2 +- include/msvc/internal/Hacl_Hash_Blake2b.h | 21 +- .../internal/Hacl_Impl_Blake2_Constants.h | 6 +- include/msvc/internal/Hacl_SHA2_Types.h | 6 +- ocaml/ctypes.depend | 10 +- ocaml/hacl-star/Hacl.ml | 4 +- rust/hacl-sys/src/bindings/bindings.rs | 369 +- rust/src/digest.rs | 16 +- rust/src/hazmat/sha3.rs | 20 +- src/msvc/Hacl_Hash_Blake2b.c | 538 +- src/msvc/Hacl_Hash_Blake2b_Simd256.c | 502 +- src/msvc/Hacl_Hash_Blake2s.c | 502 +- src/msvc/Hacl_Hash_Blake2s_Simd128.c | 500 +- src/msvc/Hacl_Hash_SHA3.c | 3795 ++--- src/msvc/Hacl_Hash_SHA3_Simd256.c | 12953 +++++----------- src/msvc/Hacl_SHA2_Vec128.c | 60 +- src/msvc/Hacl_SHA2_Vec256.c | 60 +- src/wasm/EverCrypt_Hash.wasm | Bin 48469 -> 58084 bytes src/wasm/Hacl_AEAD_Chacha20Poly1305.wasm | Bin 7653 -> 7653 bytes .../Hacl_AEAD_Chacha20Poly1305_Simd128.wasm | Bin 1910 -> 1910 bytes src/wasm/Hacl_Bignum25519_51.wasm | Bin 90858 -> 90858 bytes src/wasm/Hacl_Bignum256_32.wasm | Bin 32207 -> 32207 bytes src/wasm/Hacl_Curve25519_51.wasm | Bin 7166 -> 7166 bytes src/wasm/Hacl_HMAC.wasm | Bin 28160 -> 28160 bytes src/wasm/Hacl_HMAC_Blake2b_256.wasm | Bin 1510 -> 1510 bytes src/wasm/Hacl_HMAC_Blake2s_128.wasm | Bin 1508 -> 1508 bytes src/wasm/Hacl_Hash_Blake2b.wasm | Bin 16141 -> 22942 bytes src/wasm/Hacl_Hash_Blake2b_Simd256.wasm | Bin 7187 -> 11362 bytes src/wasm/Hacl_Hash_Blake2s.wasm | Bin 14331 -> 21136 bytes src/wasm/Hacl_Hash_Blake2s_Simd128.wasm | Bin 6030 -> 10194 bytes src/wasm/Hacl_Hash_SHA3.wasm | Bin 17565 -> 55480 bytes src/wasm/Hacl_Hash_SHA3_Simd256.wasm | Bin 0 -> 6567 bytes src/wasm/Hacl_Impl_Blake2_Constants.wasm | Bin 1517 -> 1517 bytes src/wasm/Hacl_K256_ECDSA.wasm | Bin 98188 -> 98133 bytes src/wasm/Hacl_MAC_Poly1305.wasm | Bin 9539 -> 9539 bytes ..._Hacl_Poly1305_256_Hacl_Impl_Poly1305.wasm | Bin 1993 -> 1993 bytes src/wasm/INFO.txt | 4 +- src/wasm/layouts.json | 2 +- src/wasm/main.html | 2 +- src/wasm/shell.js | 2 +- 48 files changed, 7677 insertions(+), 12080 deletions(-) create mode 100644 src/wasm/Hacl_Hash_SHA3_Simd256.wasm diff --git a/include/msvc/Hacl_Hash_Blake2b.h b/include/msvc/Hacl_Hash_Blake2b.h index 414574f9..3403fc83 100644 --- a/include/msvc/Hacl_Hash_Blake2b.h +++ b/include/msvc/Hacl_Hash_Blake2b.h @@ -38,11 +38,34 @@ extern "C" { #include "Hacl_Streaming_Types.h" #include "Hacl_Krmllib.h" -typedef struct Hacl_Hash_Blake2b_block_state_t_s +typedef struct Hacl_Hash_Blake2b_blake2_params_s +{ + uint8_t digest_length; + uint8_t key_length; + uint8_t fanout; + uint8_t depth; + uint32_t leaf_length; + uint64_t node_offset; + uint8_t node_depth; + uint8_t inner_length; + uint8_t *salt; + uint8_t *personal; +} +Hacl_Hash_Blake2b_blake2_params; + +typedef struct K____uint64_t___uint64_t__s { uint64_t *fst; uint64_t *snd; } +K____uint64_t___uint64_t_; + +typedef struct Hacl_Hash_Blake2b_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____uint64_t___uint64_t_ thd; +} Hacl_Hash_Blake2b_block_state_t; typedef struct Hacl_Hash_Blake2b_state_t_s @@ -54,23 +77,90 @@ typedef struct Hacl_Hash_Blake2b_state_t_s Hacl_Hash_Blake2b_state_t; /** - State allocation function when there is no key + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + +*/ +Hacl_Hash_Blake2b_state_t +*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); + +/** + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t kk); + +/** + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. */ Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void); /** - Re-initialization function when there is no key + General-purpose re-initialization function with parameters and +key. You cannot change digest_length or key_length, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. +*/ +void +Hacl_Hash_Blake2b_reset_with_key_and_params( + Hacl_Hash_Blake2b_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. */ -void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state); +void Hacl_Hash_Blake2b_reset_with_key(Hacl_Hash_Blake2b_state_t *s, uint8_t *k); /** - Update function when there is no key; 0 = success, 1 = max length exceeded + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. +*/ +void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *s); + +/** + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len); /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your +parameters. */ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output); @@ -79,6 +169,11 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) */ void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state); +/** + Copying. This preserves all parameters. +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_copy(Hacl_Hash_Blake2b_state_t *state); + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -99,6 +194,21 @@ Hacl_Hash_Blake2b_hash_with_key( uint32_t key_len ); +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ +void +Hacl_Hash_Blake2b_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/msvc/Hacl_Hash_Blake2b_Simd256.h b/include/msvc/Hacl_Hash_Blake2b_Simd256.h index adddce66..af309dc8 100644 --- a/include/msvc/Hacl_Hash_Blake2b_Simd256.h +++ b/include/msvc/Hacl_Hash_Blake2b_Simd256.h @@ -37,13 +37,22 @@ extern "C" { #include "Hacl_Streaming_Types.h" #include "Hacl_Krmllib.h" +#include "Hacl_Hash_Blake2b.h" #include "libintvector.h" -typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s +typedef struct K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256__s { Lib_IntVector_Intrinsics_vec256 *fst; Lib_IntVector_Intrinsics_vec256 *snd; } +K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_; + +typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ thd; +} Hacl_Hash_Blake2b_Simd256_block_state_t; typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s @@ -54,15 +63,56 @@ typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s } Hacl_Hash_Blake2b_Simd256_state_t; +/** + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (256 for S, 64 for B).) +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (256 for S, 64 for B). +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk); + /** State allocation function when there is no key */ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void); +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( + Hacl_Hash_Blake2b_Simd256_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void +Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k); + /** Re-initialization function when there is no key */ -void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state); +void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s); /** Update function when there is no key; 0 = success, 1 = max length exceeded @@ -85,6 +135,12 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 */ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state); +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state); + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -105,6 +161,15 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( uint32_t key_len ); +void +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/msvc/Hacl_Hash_Blake2s.h b/include/msvc/Hacl_Hash_Blake2s.h index 2c0d7c5b..ac783473 100644 --- a/include/msvc/Hacl_Hash_Blake2s.h +++ b/include/msvc/Hacl_Hash_Blake2s.h @@ -36,12 +36,21 @@ extern "C" { #include "krml/internal/target.h" #include "Hacl_Streaming_Types.h" +#include "Hacl_Hash_Blake2b.h" -typedef struct Hacl_Hash_Blake2s_block_state_t_s +typedef struct K____uint32_t___uint32_t__s { uint32_t *fst; uint32_t *snd; } +K____uint32_t___uint32_t_; + +typedef struct Hacl_Hash_Blake2s_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____uint32_t___uint32_t_ thd; +} Hacl_Hash_Blake2s_block_state_t; typedef struct Hacl_Hash_Blake2s_state_t_s @@ -52,15 +61,51 @@ typedef struct Hacl_Hash_Blake2s_state_t_s } Hacl_Hash_Blake2s_state_t; +/** + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (32 for S, 64 for B).) +*/ +Hacl_Hash_Blake2s_state_t +*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k); + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (32 for S, 64 for B). +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk); + /** State allocation function when there is no key */ Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void); +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_reset_with_key_and_params( + Hacl_Hash_Blake2s_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k); + /** Re-initialization function when there is no key */ -void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state); +void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s); /** Update function when there is no key; 0 = success, 1 = max length exceeded @@ -78,11 +123,16 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) */ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state); +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state); + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -98,6 +148,15 @@ Hacl_Hash_Blake2s_hash_with_key( uint32_t key_len ); +void +Hacl_Hash_Blake2s_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/msvc/Hacl_Hash_Blake2s_Simd128.h b/include/msvc/Hacl_Hash_Blake2s_Simd128.h index 6484005e..d725ee86 100644 --- a/include/msvc/Hacl_Hash_Blake2s_Simd128.h +++ b/include/msvc/Hacl_Hash_Blake2s_Simd128.h @@ -36,13 +36,22 @@ extern "C" { #include "krml/internal/target.h" #include "Hacl_Streaming_Types.h" +#include "Hacl_Hash_Blake2b.h" #include "libintvector.h" -typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s +typedef struct K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128__s { Lib_IntVector_Intrinsics_vec128 *fst; Lib_IntVector_Intrinsics_vec128 *snd; } +K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_; + +typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s +{ + uint8_t fst; + uint8_t snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ thd; +} Hacl_Hash_Blake2s_Simd128_block_state_t; typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s @@ -53,15 +62,56 @@ typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s } Hacl_Hash_Blake2s_Simd128_state_t; +/** + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (128 for S, 64 for B).) +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (128 for S, 64 for B). +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk); + /** State allocation function when there is no key */ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void); +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( + Hacl_Hash_Blake2s_Simd128_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +); + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void +Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k); + /** Re-initialization function when there is no key */ -void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state); +void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s); /** Update function when there is no key; 0 = success, 1 = max length exceeded @@ -84,11 +134,17 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 */ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state); +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state); + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -104,6 +160,15 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( uint32_t key_len ); +void +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +); + #if defined(__cplusplus) } #endif diff --git a/include/msvc/Hacl_Hash_SHA3.h b/include/msvc/Hacl_Hash_SHA3.h index 4b69f35a..8fb78fcd 100644 --- a/include/msvc/Hacl_Hash_SHA3.h +++ b/include/msvc/Hacl_Hash_SHA3.h @@ -77,6 +77,8 @@ uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s); bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s); +void Hacl_Hash_SHA3_absorb_inner_32(uint32_t rateInBytes, uint8_t *b, uint64_t *s); + void Hacl_Hash_SHA3_shake128( uint8_t *output, diff --git a/include/msvc/Hacl_Hash_SHA3_Simd256.h b/include/msvc/Hacl_Hash_SHA3_Simd256.h index f38bf7cb..617e8e34 100644 --- a/include/msvc/Hacl_Hash_SHA3_Simd256.h +++ b/include/msvc/Hacl_Hash_SHA3_Simd256.h @@ -35,28 +35,15 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_SHA2_Types.h" #include "libintvector.h" -typedef struct K____uint8_t___uint8_t__s -{ - uint8_t *fst; - uint8_t *snd; -} -K____uint8_t___uint8_t_; - -typedef struct K____uint8_t__K____uint8_t___uint8_t__s -{ - uint8_t *fst; - K____uint8_t___uint8_t_ snd; -} -K____uint8_t__K____uint8_t___uint8_t_; - -typedef struct K____uint8_t___uint8_t____K____uint8_t___uint8_t__s -{ - uint8_t *fst; - K____uint8_t__K____uint8_t___uint8_t_ snd; -} -K____uint8_t___uint8_t____K____uint8_t___uint8_t_; +void +Hacl_Hash_SHA3_Simd256_absorb_inner_256( + uint32_t rateInBytes, + Hacl_Hash_SHA2_uint8_4p b, + Lib_IntVector_Intrinsics_vec256 *s +); void Hacl_Hash_SHA3_Simd256_shake128( diff --git a/include/msvc/Hacl_SHA2_Types.h b/include/msvc/Hacl_SHA2_Types.h index d4260d77..da2a6886 100644 --- a/include/msvc/Hacl_SHA2_Types.h +++ b/include/msvc/Hacl_SHA2_Types.h @@ -35,13 +35,30 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" -#include "Hacl_Hash_SHA3_Simd256.h" +typedef struct Hacl_Hash_SHA2_uint8_2p_s +{ + uint8_t *fst; + uint8_t *snd; +} +Hacl_Hash_SHA2_uint8_2p; + +typedef struct Hacl_Hash_SHA2_uint8_3p_s +{ + uint8_t *fst; + Hacl_Hash_SHA2_uint8_2p snd; +} +Hacl_Hash_SHA2_uint8_3p; -typedef K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_2p; +typedef struct Hacl_Hash_SHA2_uint8_4p_s +{ + uint8_t *fst; + Hacl_Hash_SHA2_uint8_3p snd; +} +Hacl_Hash_SHA2_uint8_4p; -typedef K____uint8_t__K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_3p; +typedef uint8_t *Hacl_Hash_SHA2_bufx1; -typedef K____uint8_t___uint8_t____K____uint8_t___uint8_t_ Hacl_Hash_SHA2_uint8_4p; +typedef Hacl_Hash_SHA2_uint8_4p Hacl_Hash_SHA2_bufx4; #if defined(__cplusplus) } diff --git a/include/msvc/Hacl_SHA2_Vec128.h b/include/msvc/Hacl_SHA2_Vec128.h index fa6aa99b..c5df2075 100644 --- a/include/msvc/Hacl_SHA2_Vec128.h +++ b/include/msvc/Hacl_SHA2_Vec128.h @@ -35,7 +35,7 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" -#include "Hacl_Hash_SHA3_Simd256.h" +#include "Hacl_SHA2_Types.h" void Hacl_SHA2_Vec128_sha224_4( diff --git a/include/msvc/Hacl_SHA2_Vec256.h b/include/msvc/Hacl_SHA2_Vec256.h index 734c6ddd..7e41314a 100644 --- a/include/msvc/Hacl_SHA2_Vec256.h +++ b/include/msvc/Hacl_SHA2_Vec256.h @@ -35,8 +35,8 @@ extern "C" { #include "krml/lowstar_endianness.h" #include "krml/internal/target.h" +#include "Hacl_SHA2_Types.h" #include "Hacl_Krmllib.h" -#include "Hacl_Hash_SHA3_Simd256.h" void Hacl_SHA2_Vec256_sha224_8( diff --git a/include/msvc/internal/Hacl_Hash_Blake2b.h b/include/msvc/internal/Hacl_Hash_Blake2b.h index e2437d97..6928d205 100644 --- a/include/msvc/internal/Hacl_Hash_Blake2b.h +++ b/include/msvc/internal/Hacl_Hash_Blake2b.h @@ -38,20 +38,12 @@ extern "C" { #include "internal/Hacl_Impl_Blake2_Constants.h" #include "../Hacl_Hash_Blake2b.h" -typedef struct Hacl_Hash_Blake2s_blake2_params_s +typedef struct Hacl_Hash_Blake2b_index_s { - uint8_t digest_length; uint8_t key_length; - uint8_t fanout; - uint8_t depth; - uint32_t leaf_length; - uint64_t node_offset; - uint8_t node_depth; - uint8_t inner_length; - uint8_t *salt; - uint8_t *personal; + uint8_t digest_length; } -Hacl_Hash_Blake2s_blake2_params; +Hacl_Hash_Blake2b_index; void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn); @@ -77,6 +69,13 @@ Hacl_Hash_Blake2b_update_last( void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash); +typedef struct K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t__s +{ + Hacl_Hash_Blake2b_blake2_params *fst; + uint8_t *snd; +} +K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_; + #if defined(__cplusplus) } #endif diff --git a/include/msvc/internal/Hacl_Impl_Blake2_Constants.h b/include/msvc/internal/Hacl_Impl_Blake2_Constants.h index aedc2486..fb3a045c 100644 --- a/include/msvc/internal/Hacl_Impl_Blake2_Constants.h +++ b/include/msvc/internal/Hacl_Impl_Blake2_Constants.h @@ -37,7 +37,7 @@ extern "C" { static const uint32_t -Hacl_Hash_Blake2s_sigmaTable[160U] = +Hacl_Hash_Blake2b_sigmaTable[160U] = { 0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U, 12U, 13U, 14U, 15U, 14U, 10U, 4U, 8U, 9U, 15U, 13U, 6U, 1U, 12U, 0U, 2U, 11U, 7U, 5U, 3U, 11U, 8U, 12U, 0U, 5U, 2U, 15U, 13U, 10U, 14U, 3U, 6U, @@ -51,7 +51,7 @@ Hacl_Hash_Blake2s_sigmaTable[160U] = static const uint32_t -Hacl_Hash_Blake2s_ivTable_S[8U] = +Hacl_Hash_Blake2b_ivTable_S[8U] = { 0x6A09E667U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, 0x510E527FU, 0x9B05688CU, 0x1F83D9ABU, 0x5BE0CD19U @@ -59,7 +59,7 @@ Hacl_Hash_Blake2s_ivTable_S[8U] = static const uint64_t -Hacl_Hash_Blake2s_ivTable_B[8U] = +Hacl_Hash_Blake2b_ivTable_B[8U] = { 0x6A09E667F3BCC908ULL, 0xBB67AE8584CAA73BULL, 0x3C6EF372FE94F82BULL, 0xA54FF53A5F1D36F1ULL, 0x510E527FADE682D1ULL, 0x9B05688C2B3E6C1FULL, 0x1F83D9ABFB41BD6BULL, 0x5BE0CD19137E2179ULL diff --git a/include/msvc/internal/Hacl_SHA2_Types.h b/include/msvc/internal/Hacl_SHA2_Types.h index 3f07c80f..dcb276aa 100644 --- a/include/msvc/internal/Hacl_SHA2_Types.h +++ b/include/msvc/internal/Hacl_SHA2_Types.h @@ -40,7 +40,7 @@ extern "C" { typedef struct Hacl_Hash_SHA2_uint8_5p_s { uint8_t *fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; + Hacl_Hash_SHA2_uint8_4p snd; } Hacl_Hash_SHA2_uint8_5p; @@ -67,8 +67,8 @@ Hacl_Hash_SHA2_uint8_8p; typedef struct Hacl_Hash_SHA2_uint8_2x4p_s { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ snd; + Hacl_Hash_SHA2_uint8_4p fst; + Hacl_Hash_SHA2_uint8_4p snd; } Hacl_Hash_SHA2_uint8_2x4p; diff --git a/ocaml/ctypes.depend b/ocaml/ctypes.depend index 79cea4b2..d94fad90 100644 --- a/ocaml/ctypes.depend +++ b/ocaml/ctypes.depend @@ -1,4 +1,4 @@ -CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx +CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2b_stubs.cmx lib/Hacl_Hash_Blake2b_bindings.cmx lib/Hacl_Hash_Blake2s_stubs.cmx lib/Hacl_Hash_Blake2s_bindings.cmx lib/Hacl_Hash_Blake2b_Simd256_stubs.cmx lib/Hacl_Hash_Blake2b_Simd256_bindings.cmx lib/Hacl_Hash_Blake2s_Simd128_stubs.cmx lib/Hacl_Hash_Blake2s_Simd128_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/Hacl_Hash_SHA3_Simd256_stubs.cmx lib/Hacl_Hash_SHA3_Simd256_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_MAC_Poly1305_stubs.cmx lib/Hacl_MAC_Poly1305_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_bindings.cmx lib/Hacl_MAC_Poly1305_Simd128_stubs.cmx lib/Hacl_MAC_Poly1305_Simd128_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd128_bindings.cmx lib/Hacl_MAC_Poly1305_Simd256_stubs.cmx lib/Hacl_MAC_Poly1305_Simd256_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_stubs.cmx lib/Hacl_AEAD_Chacha20Poly1305_Simd256_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx lib/Hacl_Streaming_Types_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmo: lib_gen/Hacl_Streaming_Types_gen.cmx: lib/Hacl_Streaming_Types_bindings.cmx @@ -295,14 +295,14 @@ lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Imp lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx: lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx -lib/EverCrypt_Poly1305_bindings.cmx: -lib/EverCrypt_Poly1305_bindings.cmo: -lib_gen/EverCrypt_Poly1305_gen.cmx: lib/EverCrypt_Poly1305_bindings.cmx -lib_gen/EverCrypt_Poly1305_gen.exe: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx: lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx +lib/EverCrypt_Poly1305_bindings.cmx: +lib/EverCrypt_Poly1305_bindings.cmo: +lib_gen/EverCrypt_Poly1305_gen.cmx: lib/EverCrypt_Poly1305_bindings.cmx +lib_gen/EverCrypt_Poly1305_gen.exe: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve51_CP32_SHA512_gen.cmx: lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx diff --git a/ocaml/hacl-star/Hacl.ml b/ocaml/hacl-star/Hacl.ml index 63f713bd..e6bfbd45 100644 --- a/ocaml/hacl-star/Hacl.ml +++ b/ocaml/hacl-star/Hacl.ml @@ -129,11 +129,11 @@ module Keccak = struct let shake128 ~msg ~digest = (* Hacl.SHA3.shake128_hacl *) assert (C.disjoint msg digest); - Hacl_Hash_SHA3.hacl_Hash_SHA3_shake128_hacl (C.size_uint32 msg) (C.ctypes_buf msg) (C.size_uint32 digest) (C.ctypes_buf digest) + Hacl_Hash_SHA3.hacl_Hash_SHA3_shake128 (C.ctypes_buf digest) (C.size_uint32 digest) (C.ctypes_buf msg) (C.size_uint32 msg) let shake256 ~msg ~digest = (* Hacl.SHA3.shake256_hacl *) assert (C.disjoint msg digest); - Hacl_Hash_SHA3.hacl_Hash_SHA3_shake256_hacl (C.size_uint32 msg) (C.ctypes_buf msg) (C.size_uint32 digest) (C.ctypes_buf digest) + Hacl_Hash_SHA3.hacl_Hash_SHA3_shake256 (C.ctypes_buf digest) (C.size_uint32 digest) (C.ctypes_buf msg) (C.size_uint32 msg) let keccak ~rate ~capacity ~suffix ~msg ~digest = (* Hacl.Impl.SHA3.keccak *) assert (rate mod 8 = 0 && rate / 8 > 0 && rate <= 1600); diff --git a/rust/hacl-sys/src/bindings/bindings.rs b/rust/hacl-sys/src/bindings/bindings.rs index fd024e15..c67ce7bc 100644 --- a/rust/hacl-sys/src/bindings/bindings.rs +++ b/rust/hacl-sys/src/bindings/bindings.rs @@ -579,15 +579,15 @@ extern "C" { pub fn Hacl_Hash_SHA2_hash_384(output: *mut u8, input: *mut u8, input_len: u32); } extern "C" { - #[doc = "Compute the public key from the private key.\n\nThe outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32].\nThe argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]."] + #[doc = "Compute the public key from the private key.\n\n@param[out] public_key Points to 32 bytes of valid memory, i.e., `uint8_t[32]`. Must not overlap the memory location of `private_key`.\n@param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`."] pub fn Hacl_Ed25519_secret_to_public(public_key: *mut u8, private_key: *mut u8); } extern "C" { - #[doc = "Compute the expanded keys for an Ed25519 signature.\n\nThe outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96].\nThe argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32].\n\nIf one needs to sign several messages under the same private key, it is more efficient\nto call `expand_keys` only once and `sign_expanded` multiple times, for each message."] + #[doc = "Compute the expanded keys for an Ed25519 signature.\n\n@param[out] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`. Must not overlap the memory location of `private_key`.\n@param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`.\n\nIf one needs to sign several messages under the same private key, it is more efficient\nto call `expand_keys` only once and `sign_expanded` multiple times, for each message."] pub fn Hacl_Ed25519_expand_keys(expanded_keys: *mut u8, private_key: *mut u8); } extern "C" { - #[doc = "Create an Ed25519 signature with the (precomputed) expanded keys.\n\nThe outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64].\nThe argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96].\nThe argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len].\n\nThe argument `expanded_keys` is obtained through `expand_keys`.\n\nIf one needs to sign several messages under the same private key, it is more efficient\nto call `expand_keys` only once and `sign_expanded` multiple times, for each message."] + #[doc = "Create an Ed25519 signature with the (precomputed) expanded keys.\n\n@param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `expanded_keys` nor `msg`.\n@param[in] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`, containing the expanded keys obtained by invoking `expand_keys`.\n@param[in] msg_len Length of `msg`.\n@param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`.\n\nIf one needs to sign several messages under the same private key, it is more efficient\nto call `expand_keys` only once and `sign_expanded` multiple times, for each message."] pub fn Hacl_Ed25519_sign_expanded( signature: *mut u8, expanded_keys: *mut u8, @@ -596,11 +596,11 @@ extern "C" { ); } extern "C" { - #[doc = "Create an Ed25519 signature.\n\nThe outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64].\nThe argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32].\nThe argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len].\n\nThe function first calls `expand_keys` and then invokes `sign_expanded`.\n\nIf one needs to sign several messages under the same private key, it is more efficient\nto call `expand_keys` only once and `sign_expanded` multiple times, for each message."] + #[doc = "Create an Ed25519 signature.\n\n@param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `private_key` nor `msg`.\n@param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`.\n@param[in] msg_len Length of `msg`.\n@param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`.\n\nThe function first calls `expand_keys` and then invokes `sign_expanded`.\n\nIf one needs to sign several messages under the same private key, it is more efficient\nto call `expand_keys` only once and `sign_expanded` multiple times, for each message."] pub fn Hacl_Ed25519_sign(signature: *mut u8, private_key: *mut u8, msg_len: u32, msg: *mut u8); } extern "C" { - #[doc = "Verify an Ed25519 signature.\n\nThe function returns `true` if the signature is valid and `false` otherwise.\n\nThe argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32].\nThe argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len].\nThe argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]."] + #[doc = "Verify an Ed25519 signature.\n\n@param public_key Points to 32 bytes of valid memory containing the public key, i.e., `uint8_t[32]`.\n@param msg_len Length of `msg`.\n@param msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`.\n@param signature Points to 64 bytes of valid memory containing the signature, i.e., `uint8_t[64]`.\n\n@return Returns `true` if the signature is valid and `false` otherwise."] pub fn Hacl_Ed25519_verify( public_key: *mut u8, msg_len: u32, @@ -663,46 +663,98 @@ extern "C" { } #[repr(C)] #[derive(Debug, Copy, Clone)] -pub struct Hacl_Hash_Blake2s_block_state_t_s { - pub fst: *mut u32, - pub snd: *mut u32, +pub struct Hacl_Hash_Blake2b_blake2_params_s { + pub digest_length: u8, + pub key_length: u8, + pub fanout: u8, + pub depth: u8, + pub leaf_length: u32, + pub node_offset: u64, + pub node_depth: u8, + pub inner_length: u8, + pub salt: *mut u8, + pub personal: *mut u8, +} +pub type Hacl_Hash_Blake2b_blake2_params = Hacl_Hash_Blake2b_blake2_params_s; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct K____uint64_t___uint64_t__s { + pub fst: *mut u64, + pub snd: *mut u64, } -pub type Hacl_Hash_Blake2s_block_state_t = Hacl_Hash_Blake2s_block_state_t_s; +pub type K____uint64_t___uint64_t_ = K____uint64_t___uint64_t__s; #[repr(C)] #[derive(Debug, Copy, Clone)] -pub struct Hacl_Hash_Blake2s_state_t_s { - pub block_state: Hacl_Hash_Blake2s_block_state_t, +pub struct Hacl_Hash_Blake2b_block_state_t_s { + pub fst: u8, + pub snd: u8, + pub thd: K____uint64_t___uint64_t_, +} +pub type Hacl_Hash_Blake2b_block_state_t = Hacl_Hash_Blake2b_block_state_t_s; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Hacl_Hash_Blake2b_state_t_s { + pub block_state: Hacl_Hash_Blake2b_block_state_t, pub buf: *mut u8, pub total_len: u64, } -pub type Hacl_Hash_Blake2s_state_t = Hacl_Hash_Blake2s_state_t_s; +pub type Hacl_Hash_Blake2b_state_t = Hacl_Hash_Blake2b_state_t_s; extern "C" { - #[doc = "State allocation function when there is no key"] - pub fn Hacl_Hash_Blake2s_malloc() -> *mut Hacl_Hash_Blake2s_state_t; + #[doc = "General-purpose allocation function that gives control over all\nBlake2 parameters, including the key. Further resettings of the state SHALL be\ndone with `reset_with_params_and_key`, and SHALL feature the exact same values\nfor the `key_length` and `digest_length` fields as passed here. In other words,\nonce you commit to a digest and key length, the only way to change these\nparameters is to allocate a new object.\n\nThe caller must satisfy the following requirements.\n- The length of the key k MUST match the value of the field key_length in the\nparameters.\n- The key_length must not exceed 32 for S, 64 for B.\n- The digest_length must not exceed 32 for S, 64 for B."] + pub fn Hacl_Hash_Blake2b_malloc_with_params_and_key( + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ) -> *mut Hacl_Hash_Blake2b_state_t; } extern "C" { - #[doc = "Re-initialization function when there is no key"] - pub fn Hacl_Hash_Blake2s_reset(state: *mut Hacl_Hash_Blake2s_state_t); + #[doc = "Specialized allocation function that picks default values for all\nparameters, except for the key_length. Further resettings of the state SHALL be\ndone with `reset_with_key`, and SHALL feature the exact same key length `kk` as\npassed here. In other words, once you commit to a key length, the only way to\nchange this parameter is to allocate a new object.\n\nThe caller must satisfy the following requirements.\n- The key_length must not exceed 32 for S, 64 for B."] + pub fn Hacl_Hash_Blake2b_malloc_with_key(k: *mut u8, kk: u8) -> *mut Hacl_Hash_Blake2b_state_t; } extern "C" { - #[doc = "Update function when there is no key; 0 = success, 1 = max length exceeded"] - pub fn Hacl_Hash_Blake2s_update( - state: *mut Hacl_Hash_Blake2s_state_t, + #[doc = "Specialized allocation function that picks default values for all\nparameters, and has no key. Effectively, this is what you want if you intend to\nuse Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`."] + pub fn Hacl_Hash_Blake2b_malloc() -> *mut Hacl_Hash_Blake2b_state_t; +} +extern "C" { + #[doc = "General-purpose re-initialization function with parameters and\nkey. You cannot change digest_length or key_length, meaning those values in\nthe parameters object must be the same as originally decided via one of the\nmalloc functions. All other values of the parameter can be changed. The behavior\nis unspecified if you violate this precondition."] + pub fn Hacl_Hash_Blake2b_reset_with_key_and_params( + s: *mut Hacl_Hash_Blake2b_state_t, + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ); +} +extern "C" { + #[doc = "Specialized-purpose re-initialization function with no parameters,\nand a key. The key length must be the same as originally decided via your choice\nof malloc function. All other parameters are reset to their default values. The\noriginal call to malloc MUST have set digest_length to the default value. The\nbehavior is unspecified if you violate this precondition."] + pub fn Hacl_Hash_Blake2b_reset_with_key(s: *mut Hacl_Hash_Blake2b_state_t, k: *mut u8); +} +extern "C" { + #[doc = "Specialized-purpose re-initialization function with no parameters\nand no key. This is what you want if you intend to use Blake2 as a hash\nfunction. The key length and digest length must have been set to their\nrespective default values via your choice of malloc function (always true if you\nused `malloc`). All other parameters are reset to their default values. The\nbehavior is unspecified if you violate this precondition."] + pub fn Hacl_Hash_Blake2b_reset(s: *mut Hacl_Hash_Blake2b_state_t); +} +extern "C" { + #[doc = "Update function; 0 = success, 1 = max length exceeded"] + pub fn Hacl_Hash_Blake2b_update( + state: *mut Hacl_Hash_Blake2b_state_t, chunk: *mut u8, chunk_len: u32, ) -> Hacl_Streaming_Types_error_code; } extern "C" { - #[doc = "Finish function when there is no key"] - pub fn Hacl_Hash_Blake2s_digest(state: *mut Hacl_Hash_Blake2s_state_t, output: *mut u8); + #[doc = "Digest function. This function expects the `output` array to hold\nat least `digest_length` bytes, where `digest_length` was determined by your\nchoice of `malloc` function. Concretely, if you used `malloc` or\n`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default\ndigest length). If you used `malloc_with_params_and_key`, then the expected\nlength is whatever you chose for the `digest_length` field of your\nparameters."] + pub fn Hacl_Hash_Blake2b_digest(state: *mut Hacl_Hash_Blake2b_state_t, output: *mut u8); } extern "C" { #[doc = "Free state function when there is no key"] - pub fn Hacl_Hash_Blake2s_free(state: *mut Hacl_Hash_Blake2s_state_t); + pub fn Hacl_Hash_Blake2b_free(state: *mut Hacl_Hash_Blake2b_state_t); } extern "C" { - #[doc = "Write the BLAKE2s digest of message `input` using key `key` into `output`.\n\n@param output Pointer to `output_len` bytes of memory where the digest is written to.\n@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.\n@param input Pointer to `input_len` bytes of memory where the input message is read from.\n@param input_len Length of the input message.\n@param key Pointer to `key_len` bytes of memory where the key is read from.\n@param key_len Length of the key. Can be 0."] - pub fn Hacl_Hash_Blake2s_hash_with_key( + #[doc = "Copying. This preserves all parameters."] + pub fn Hacl_Hash_Blake2b_copy( + state: *mut Hacl_Hash_Blake2b_state_t, + ) -> *mut Hacl_Hash_Blake2b_state_t; +} +extern "C" { + #[doc = "Write the BLAKE2b digest of message `input` using key `key` into `output`.\n\n@param output Pointer to `output_len` bytes of memory where the digest is written to.\n@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.\n@param input Pointer to `input_len` bytes of memory where the input message is read from.\n@param input_len Length of the input message.\n@param key Pointer to `key_len` bytes of memory where the key is read from.\n@param key_len Length of the key. Can be 0."] + pub fn Hacl_Hash_Blake2b_hash_with_key( output: *mut u8, output_len: u32, input: *mut u8, @@ -711,48 +763,95 @@ extern "C" { key_len: u32, ); } +extern "C" { + #[doc = "Write the BLAKE2b digest of message `input` using key `key` and\nparameters `params` into `output`. The `key` array must be of length\n`params.key_length`. The `output` array must be of length\n`params.digest_length`."] + pub fn Hacl_Hash_Blake2b_hash_with_key_and_paramas( + output: *mut u8, + input: *mut u8, + input_len: u32, + params: Hacl_Hash_Blake2b_blake2_params, + key: *mut u8, + ); +} #[repr(C)] #[derive(Debug, Copy, Clone)] -pub struct Hacl_Hash_Blake2b_block_state_t_s { - pub fst: *mut u64, - pub snd: *mut u64, +pub struct K____uint32_t___uint32_t__s { + pub fst: *mut u32, + pub snd: *mut u32, } -pub type Hacl_Hash_Blake2b_block_state_t = Hacl_Hash_Blake2b_block_state_t_s; +pub type K____uint32_t___uint32_t_ = K____uint32_t___uint32_t__s; #[repr(C)] #[derive(Debug, Copy, Clone)] -pub struct Hacl_Hash_Blake2b_state_t_s { - pub block_state: Hacl_Hash_Blake2b_block_state_t, +pub struct Hacl_Hash_Blake2s_block_state_t_s { + pub fst: u8, + pub snd: u8, + pub thd: K____uint32_t___uint32_t_, +} +pub type Hacl_Hash_Blake2s_block_state_t = Hacl_Hash_Blake2s_block_state_t_s; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Hacl_Hash_Blake2s_state_t_s { + pub block_state: Hacl_Hash_Blake2s_block_state_t, pub buf: *mut u8, pub total_len: u64, } -pub type Hacl_Hash_Blake2b_state_t = Hacl_Hash_Blake2b_state_t_s; +pub type Hacl_Hash_Blake2s_state_t = Hacl_Hash_Blake2s_state_t_s; +extern "C" { + #[doc = "State allocation function when there are parameters and a key. The\nlength of the key k MUST match the value of the field key_length in the\nparameters. Furthermore, there is a static (not dynamically checked) requirement\nthat key_length does not exceed max_key (32 for S, 64 for B).)"] + pub fn Hacl_Hash_Blake2s_malloc_with_params_and_key( + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ) -> *mut Hacl_Hash_Blake2s_state_t; +} +extern "C" { + #[doc = "State allocation function when there is just a custom key. All\nother parameters are set to their respective default values, meaning the output\nlength is the maximum allowed output (32 for S, 64 for B)."] + pub fn Hacl_Hash_Blake2s_malloc_with_key(k: *mut u8, kk: u8) -> *mut Hacl_Hash_Blake2s_state_t; +} extern "C" { #[doc = "State allocation function when there is no key"] - pub fn Hacl_Hash_Blake2b_malloc() -> *mut Hacl_Hash_Blake2b_state_t; + pub fn Hacl_Hash_Blake2s_malloc() -> *mut Hacl_Hash_Blake2s_state_t; +} +extern "C" { + #[doc = "Re-initialization function. The reinitialization API is tricky --\nyou MUST reuse the same original parameters for digest (output) length and key\nlength."] + pub fn Hacl_Hash_Blake2s_reset_with_key_and_params( + s: *mut Hacl_Hash_Blake2s_state_t, + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ); +} +extern "C" { + #[doc = "Re-initialization function when there is a key. Note that the key\nsize is not allowed to change, which is why this function does not take a key\nlength -- the key has to be same key size that was originally passed to\n`malloc_with_key`"] + pub fn Hacl_Hash_Blake2s_reset_with_key(s: *mut Hacl_Hash_Blake2s_state_t, k: *mut u8); } extern "C" { #[doc = "Re-initialization function when there is no key"] - pub fn Hacl_Hash_Blake2b_reset(state: *mut Hacl_Hash_Blake2b_state_t); + pub fn Hacl_Hash_Blake2s_reset(s: *mut Hacl_Hash_Blake2s_state_t); } extern "C" { #[doc = "Update function when there is no key; 0 = success, 1 = max length exceeded"] - pub fn Hacl_Hash_Blake2b_update( - state: *mut Hacl_Hash_Blake2b_state_t, + pub fn Hacl_Hash_Blake2s_update( + state: *mut Hacl_Hash_Blake2s_state_t, chunk: *mut u8, chunk_len: u32, ) -> Hacl_Streaming_Types_error_code; } extern "C" { #[doc = "Finish function when there is no key"] - pub fn Hacl_Hash_Blake2b_digest(state: *mut Hacl_Hash_Blake2b_state_t, output: *mut u8); + pub fn Hacl_Hash_Blake2s_digest(state: *mut Hacl_Hash_Blake2s_state_t, output: *mut u8); } extern "C" { #[doc = "Free state function when there is no key"] - pub fn Hacl_Hash_Blake2b_free(state: *mut Hacl_Hash_Blake2b_state_t); + pub fn Hacl_Hash_Blake2s_free(state: *mut Hacl_Hash_Blake2s_state_t); } extern "C" { - #[doc = "Write the BLAKE2b digest of message `input` using key `key` into `output`.\n\n@param output Pointer to `output_len` bytes of memory where the digest is written to.\n@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.\n@param input Pointer to `input_len` bytes of memory where the input message is read from.\n@param input_len Length of the input message.\n@param key Pointer to `key_len` bytes of memory where the key is read from.\n@param key_len Length of the key. Can be 0."] - pub fn Hacl_Hash_Blake2b_hash_with_key( + #[doc = "Copying. The key length (or absence thereof) must match between source and destination."] + pub fn Hacl_Hash_Blake2s_copy( + state: *mut Hacl_Hash_Blake2s_state_t, + ) -> *mut Hacl_Hash_Blake2s_state_t; +} +extern "C" { + #[doc = "Write the BLAKE2s digest of message `input` using key `key` into `output`.\n\n@param output Pointer to `output_len` bytes of memory where the digest is written to.\n@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.\n@param input Pointer to `input_len` bytes of memory where the input message is read from.\n@param input_len Length of the input message.\n@param key Pointer to `key_len` bytes of memory where the key is read from.\n@param key_len Length of the key. Can be 0."] + pub fn Hacl_Hash_Blake2s_hash_with_key( output: *mut u8, output_len: u32, input: *mut u8, @@ -761,6 +860,15 @@ extern "C" { key_len: u32, ); } +extern "C" { + pub fn Hacl_Hash_Blake2s_hash_with_key_and_paramas( + output: *mut u8, + input: *mut u8, + input_len: u32, + params: Hacl_Hash_Blake2b_blake2_params, + key: *mut u8, + ); +} extern "C" { pub fn EverCrypt_HMAC_is_supported_alg(uu___: Spec_Hash_Definitions_hash_alg) -> bool; } @@ -835,65 +943,81 @@ extern "C" { pub fn Hacl_Hash_SHA3_is_shake(s: *mut Hacl_Hash_SHA3_state_t) -> bool; } extern "C" { - pub fn Hacl_Hash_SHA3_shake128_hacl( - inputByteLen: u32, - input: *mut u8, - outputByteLen: u32, + pub fn Hacl_Hash_SHA3_absorb_inner_32(rateInBytes: u32, b: *mut u8, s: *mut u64); +} +extern "C" { + pub fn Hacl_Hash_SHA3_shake128( output: *mut u8, + outputByteLen: u32, + input: *mut u8, + inputByteLen: u32, ); } extern "C" { - pub fn Hacl_Hash_SHA3_shake256_hacl( - inputByteLen: u32, - input: *mut u8, - outputByteLen: u32, + pub fn Hacl_Hash_SHA3_shake256( output: *mut u8, + outputByteLen: u32, + input: *mut u8, + inputByteLen: u32, ); } extern "C" { - pub fn Hacl_Hash_SHA3_sha3_224(output: *mut u8, input: *mut u8, input_len: u32); + pub fn Hacl_Hash_SHA3_sha3_224(output: *mut u8, input: *mut u8, inputByteLen: u32); } extern "C" { - pub fn Hacl_Hash_SHA3_sha3_256(output: *mut u8, input: *mut u8, input_len: u32); + pub fn Hacl_Hash_SHA3_sha3_256(output: *mut u8, input: *mut u8, inputByteLen: u32); } extern "C" { - pub fn Hacl_Hash_SHA3_sha3_384(output: *mut u8, input: *mut u8, input_len: u32); + pub fn Hacl_Hash_SHA3_sha3_384(output: *mut u8, input: *mut u8, inputByteLen: u32); } extern "C" { - pub fn Hacl_Hash_SHA3_sha3_512(output: *mut u8, input: *mut u8, input_len: u32); + pub fn Hacl_Hash_SHA3_sha3_512(output: *mut u8, input: *mut u8, inputByteLen: u32); } extern "C" { - pub fn Hacl_Hash_SHA3_absorb_inner(rateInBytes: u32, block: *mut u8, s: *mut u64); + #[doc = "Allocate state buffer of 200-bytes"] + pub fn Hacl_Hash_SHA3_state_malloc() -> *mut u64; } extern "C" { - pub fn Hacl_Hash_SHA3_squeeze0( - s: *mut u64, - rateInBytes: u32, - outputByteLen: u32, - output: *mut u8, - ); + #[doc = "Free state buffer"] + pub fn Hacl_Hash_SHA3_state_free(s: *mut u64); } extern "C" { - pub fn Hacl_Hash_SHA3_keccak( - rate: u32, - capacity: u32, - inputByteLen: u32, + #[doc = "Absorb number of input blocks and write the output state\n\nThis function is intended to receive a hash state and input buffer.\nIt prcoesses an input of multiple of 168-bytes (SHAKE128 block size),\nany additional bytes of final partial block are ignored.\n\nThe argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25]\nThe argument `input` (IN) points to `inputByteLen` bytes of valid memory,\ni.e., uint8_t[inputByteLen]"] + pub fn Hacl_Hash_SHA3_shake128_absorb_nblocks( + state: *mut u64, input: *mut u8, - delimitedSuffix: u8, - outputByteLen: u32, + inputByteLen: u32, + ); +} +extern "C" { + #[doc = "Absorb a final partial block of input and write the output state\n\nThis function is intended to receive a hash state and input buffer.\nIt prcoesses a sequence of bytes at end of input buffer that is less\nthan 168-bytes (SHAKE128 block size),\nany bytes of full blocks at start of input buffer are ignored.\n\nThe argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25]\nThe argument `input` (IN) points to `inputByteLen` bytes of valid memory,\ni.e., uint8_t[inputByteLen]\n\nNote: Full size of input buffer must be passed to `inputByteLen` including\nthe number of full-block bytes at start of input buffer that are ignored"] + pub fn Hacl_Hash_SHA3_shake128_absorb_final(state: *mut u64, input: *mut u8, inputByteLen: u32); +} +extern "C" { + #[doc = "Squeeze a hash state to output buffer\n\nThis function is intended to receive a hash state and output buffer.\nIt produces an output of multiple of 168-bytes (SHAKE128 block size),\nany additional bytes of final partial block are ignored.\n\nThe argument `state` (IN) points to hash state, i.e., uint64_t[25]\nThe argument `output` (OUT) points to `outputByteLen` bytes of valid memory,\ni.e., uint8_t[outputByteLen]"] + pub fn Hacl_Hash_SHA3_shake128_squeeze_nblocks( + state: *mut u64, output: *mut u8, + outputByteLen: u32, ); } -pub type __m128i = [::std::os::raw::c_longlong; 2usize]; -pub type Lib_IntVector_Intrinsics_vec128 = __m128i; -pub type __m256i = [::std::os::raw::c_longlong; 4usize]; -pub type Lib_IntVector_Intrinsics_vec256 = __m256i; +pub type uint32x4_t = [u32; 4usize]; +pub type Lib_IntVector_Intrinsics_vec128 = uint32x4_t; #[repr(C)] #[derive(Debug, Copy, Clone)] -pub struct Hacl_Hash_Blake2s_Simd128_block_state_t_s { +pub struct K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128__s { pub fst: *mut Lib_IntVector_Intrinsics_vec128, pub snd: *mut Lib_IntVector_Intrinsics_vec128, } +pub type K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ = + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128__s; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Hacl_Hash_Blake2s_Simd128_block_state_t_s { + pub fst: u8, + pub snd: u8, + pub thd: K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_, +} pub type Hacl_Hash_Blake2s_Simd128_block_state_t = Hacl_Hash_Blake2s_Simd128_block_state_t_s; #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -903,13 +1027,42 @@ pub struct Hacl_Hash_Blake2s_Simd128_state_t_s { pub total_len: u64, } pub type Hacl_Hash_Blake2s_Simd128_state_t = Hacl_Hash_Blake2s_Simd128_state_t_s; +extern "C" { + #[doc = "State allocation function when there are parameters and a key. The\nlength of the key k MUST match the value of the field key_length in the\nparameters. Furthermore, there is a static (not dynamically checked) requirement\nthat key_length does not exceed max_key (128 for S, 64 for B).)"] + pub fn Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ) -> *mut Hacl_Hash_Blake2s_Simd128_state_t; +} +extern "C" { + #[doc = "State allocation function when there is just a custom key. All\nother parameters are set to their respective default values, meaning the output\nlength is the maximum allowed output (128 for S, 64 for B)."] + pub fn Hacl_Hash_Blake2s_Simd128_malloc_with_key0( + k: *mut u8, + kk: u8, + ) -> *mut Hacl_Hash_Blake2s_Simd128_state_t; +} extern "C" { #[doc = "State allocation function when there is no key"] pub fn Hacl_Hash_Blake2s_Simd128_malloc() -> *mut Hacl_Hash_Blake2s_Simd128_state_t; } +extern "C" { + #[doc = "Re-initialization function. The reinitialization API is tricky --\nyou MUST reuse the same original parameters for digest (output) length and key\nlength."] + pub fn Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( + s: *mut Hacl_Hash_Blake2s_Simd128_state_t, + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ); +} +extern "C" { + #[doc = "Re-initialization function when there is a key. Note that the key\nsize is not allowed to change, which is why this function does not take a key\nlength -- the key has to be same key size that was originally passed to\n`malloc_with_key`"] + pub fn Hacl_Hash_Blake2s_Simd128_reset_with_key( + s: *mut Hacl_Hash_Blake2s_Simd128_state_t, + k: *mut u8, + ); +} extern "C" { #[doc = "Re-initialization function when there is no key"] - pub fn Hacl_Hash_Blake2s_Simd128_reset(state: *mut Hacl_Hash_Blake2s_Simd128_state_t); + pub fn Hacl_Hash_Blake2s_Simd128_reset(s: *mut Hacl_Hash_Blake2s_Simd128_state_t); } extern "C" { #[doc = "Update function when there is no key; 0 = success, 1 = max length exceeded"] @@ -931,7 +1084,13 @@ extern "C" { pub fn Hacl_Hash_Blake2s_Simd128_free(state: *mut Hacl_Hash_Blake2s_Simd128_state_t); } extern "C" { - #[doc = "Write the BLAKE2s digest of message `input` using key `key` into `output`.\n\n@param output Pointer to `output_len` bytes of memory where the digest is written to.\n@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.\n@param input Pointer to `input_len` bytes of memory where the input message is read from.\n@param input_len Length of the input message.\n@param key Pointer to `key_len` bytes of memory where the key is read from.\n@param key_len Length of the key. Can be 0."] + #[doc = "Copying. The key length (or absence thereof) must match between source and destination."] + pub fn Hacl_Hash_Blake2s_Simd128_copy( + state: *mut Hacl_Hash_Blake2s_Simd128_state_t, + ) -> *mut Hacl_Hash_Blake2s_Simd128_state_t; +} +extern "C" { + #[doc = "Write the BLAKE2s digest of message `input` using key `key` into `output`.\n\n@param output Pointer to `output_len` bytes of memory where the digest is written to.\n@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.\n@param input Pointer to `input_len` bytes of memory where the input message is read from.\n@param input_len Length of the input message.\n@param key Pointer to `key_len` bytes of memory where the key is read from.\n@param key_len Length of the key. Can be 0."] pub fn Hacl_Hash_Blake2s_Simd128_hash_with_key( output: *mut u8, output_len: u32, @@ -941,11 +1100,29 @@ extern "C" { key_len: u32, ); } +extern "C" { + pub fn Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( + output: *mut u8, + input: *mut u8, + input_len: u32, + params: Hacl_Hash_Blake2b_blake2_params, + key: *mut u8, + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256__s { + pub fst: *mut *mut ::std::os::raw::c_void, + pub snd: *mut *mut ::std::os::raw::c_void, +} +pub type K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ = + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256__s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct Hacl_Hash_Blake2b_Simd256_block_state_t_s { - pub fst: *mut Lib_IntVector_Intrinsics_vec256, - pub snd: *mut Lib_IntVector_Intrinsics_vec256, + pub fst: u8, + pub snd: u8, + pub thd: K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_, } pub type Hacl_Hash_Blake2b_Simd256_block_state_t = Hacl_Hash_Blake2b_Simd256_block_state_t_s; #[repr(C)] @@ -956,13 +1133,42 @@ pub struct Hacl_Hash_Blake2b_Simd256_state_t_s { pub total_len: u64, } pub type Hacl_Hash_Blake2b_Simd256_state_t = Hacl_Hash_Blake2b_Simd256_state_t_s; +extern "C" { + #[doc = "State allocation function when there are parameters and a key. The\nlength of the key k MUST match the value of the field key_length in the\nparameters. Furthermore, there is a static (not dynamically checked) requirement\nthat key_length does not exceed max_key (256 for S, 64 for B).)"] + pub fn Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ) -> *mut Hacl_Hash_Blake2b_Simd256_state_t; +} +extern "C" { + #[doc = "State allocation function when there is just a custom key. All\nother parameters are set to their respective default values, meaning the output\nlength is the maximum allowed output (256 for S, 64 for B)."] + pub fn Hacl_Hash_Blake2b_Simd256_malloc_with_key0( + k: *mut u8, + kk: u8, + ) -> *mut Hacl_Hash_Blake2b_Simd256_state_t; +} extern "C" { #[doc = "State allocation function when there is no key"] pub fn Hacl_Hash_Blake2b_Simd256_malloc() -> *mut Hacl_Hash_Blake2b_Simd256_state_t; } +extern "C" { + #[doc = "Re-initialization function. The reinitialization API is tricky --\nyou MUST reuse the same original parameters for digest (output) length and key\nlength."] + pub fn Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( + s: *mut Hacl_Hash_Blake2b_Simd256_state_t, + p: *mut Hacl_Hash_Blake2b_blake2_params, + k: *mut u8, + ); +} +extern "C" { + #[doc = "Re-initialization function when there is a key. Note that the key\nsize is not allowed to change, which is why this function does not take a key\nlength -- the key has to be same key size that was originally passed to\n`malloc_with_key`"] + pub fn Hacl_Hash_Blake2b_Simd256_reset_with_key( + s: *mut Hacl_Hash_Blake2b_Simd256_state_t, + k: *mut u8, + ); +} extern "C" { #[doc = "Re-initialization function when there is no key"] - pub fn Hacl_Hash_Blake2b_Simd256_reset(state: *mut Hacl_Hash_Blake2b_Simd256_state_t); + pub fn Hacl_Hash_Blake2b_Simd256_reset(s: *mut Hacl_Hash_Blake2b_Simd256_state_t); } extern "C" { #[doc = "Update function when there is no key; 0 = success, 1 = max length exceeded"] @@ -983,6 +1189,12 @@ extern "C" { #[doc = "Free state function when there is no key"] pub fn Hacl_Hash_Blake2b_Simd256_free(state: *mut Hacl_Hash_Blake2b_Simd256_state_t); } +extern "C" { + #[doc = "Copying. The key length (or absence thereof) must match between source and destination."] + pub fn Hacl_Hash_Blake2b_Simd256_copy( + state: *mut Hacl_Hash_Blake2b_Simd256_state_t, + ) -> *mut Hacl_Hash_Blake2b_Simd256_state_t; +} extern "C" { #[doc = "Write the BLAKE2b digest of message `input` using key `key` into `output`.\n\n@param output Pointer to `output_len` bytes of memory where the digest is written to.\n@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.\n@param input Pointer to `input_len` bytes of memory where the input message is read from.\n@param input_len Length of the input message.\n@param key Pointer to `key_len` bytes of memory where the key is read from.\n@param key_len Length of the key. Can be 0."] pub fn Hacl_Hash_Blake2b_Simd256_hash_with_key( @@ -994,6 +1206,15 @@ extern "C" { key_len: u32, ); } +extern "C" { + pub fn Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( + output: *mut u8, + input: *mut u8, + input_len: u32, + params: Hacl_Hash_Blake2b_blake2_params, + key: *mut u8, + ); +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct EverCrypt_Hash_state_s_s { diff --git a/rust/src/digest.rs b/rust/src/digest.rs index b7fb06f8..0156fb4b 100644 --- a/rust/src/digest.rs +++ b/rust/src/digest.rs @@ -420,11 +420,11 @@ pub fn hash(alg: Algorithm, data: &[u8]) -> Vec { pub fn shake128(data: &[u8]) -> [u8; BYTES] { let mut out = [0u8; BYTES]; unsafe { - Hacl_Hash_SHA3_shake128_hacl( - data.len() as u32, - data.as_ptr() as _, - BYTES as u32, + Hacl_Hash_SHA3_shake128( out.as_mut_ptr(), + BYTES as u32, + data.as_ptr() as _, + data.len() as u32, ); } out @@ -437,11 +437,11 @@ pub fn shake128(data: &[u8]) -> [u8; BYTES] { pub fn shake256(data: &[u8]) -> [u8; BYTES] { let mut out = [0u8; BYTES]; unsafe { - Hacl_Hash_SHA3_shake256_hacl( - data.len() as u32, - data.as_ptr() as _, - BYTES as u32, + Hacl_Hash_SHA3_shake256( out.as_mut_ptr(), + BYTES as u32, + data.as_ptr() as _, + data.len() as u32, ); } out diff --git a/rust/src/hazmat/sha3.rs b/rust/src/hazmat/sha3.rs index c8ecc913..0c369b47 100644 --- a/rust/src/hazmat/sha3.rs +++ b/rust/src/hazmat/sha3.rs @@ -1,6 +1,6 @@ use hacl_sys::{ - Hacl_Hash_SHA3_sha3_224, Hacl_Hash_SHA3_sha3_256, Hacl_Hash_SHA3_sha3_384, Hacl_Hash_SHA3_sha3_512, - Hacl_Hash_SHA3_shake128_hacl, Hacl_Hash_SHA3_shake256_hacl, + Hacl_Hash_SHA3_sha3_224, Hacl_Hash_SHA3_sha3_256, Hacl_Hash_SHA3_sha3_384, + Hacl_Hash_SHA3_sha3_512, Hacl_Hash_SHA3_shake128, Hacl_Hash_SHA3_shake256, }; /// SHA3 224 @@ -59,11 +59,11 @@ pub fn sha512(payload: &[u8]) -> [u8; 64] { pub fn shake128(data: &[u8]) -> [u8; BYTES] { let mut out = [0u8; BYTES]; unsafe { - Hacl_Hash_SHA3_shake128_hacl( - data.len() as u32, - data.as_ptr() as _, - BYTES as u32, + Hacl_Hash_SHA3_shake128( out.as_mut_ptr(), + BYTES as u32, + data.as_ptr() as _, + data.len() as u32, ); } out @@ -76,11 +76,11 @@ pub fn shake128(data: &[u8]) -> [u8; BYTES] { pub fn shake256(data: &[u8]) -> [u8; BYTES] { let mut out = [0u8; BYTES]; unsafe { - Hacl_Hash_SHA3_shake256_hacl( - data.len() as u32, - data.as_ptr() as _, - BYTES as u32, + Hacl_Hash_SHA3_shake256( out.as_mut_ptr(), + BYTES as u32, + data.as_ptr() as _, + data.len() as u32, ); } out diff --git a/src/msvc/Hacl_Hash_Blake2b.c b/src/msvc/Hacl_Hash_Blake2b.c index 68de8340..d490a1a5 100644 --- a/src/msvc/Hacl_Hash_Blake2b.c +++ b/src/msvc/Hacl_Hash_Blake2b.c @@ -76,22 +76,22 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl uint64_t *r1 = m_st + 4U; uint64_t *r20 = m_st + 8U; uint64_t *r30 = m_st + 12U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; uint64_t uu____0 = m_w[s2]; uint64_t uu____1 = m_w[s4]; uint64_t uu____2 = m_w[s6]; @@ -474,19 +474,27 @@ update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totl void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) { + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = 64U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; uint64_t tmp[8U] = { 0U }; uint64_t *r0 = hash; uint64_t *r1 = hash + 4U; uint64_t *r2 = hash + 8U; uint64_t *r3 = hash + 12U; - uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U]; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; r2[0U] = iv0; r2[1U] = iv1; r2[2U] = iv2; @@ -495,14 +503,88 @@ void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; - uint8_t salt[16U] = { 0U }; - uint8_t personal[16U] = { 0U }; - Hacl_Hash_Blake2s_blake2_params - p = - { - .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, - .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal - }; + uint8_t kk1 = (uint8_t)kk; + uint8_t nn1 = (uint8_t)nn; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk1 + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; +} + +static void init_with_params(uint64_t *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = hash; + uint64_t *r1 = hash + 4U; + uint64_t *r2 = hash + 8U; + uint64_t *r3 = hash + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk = p.key_length; + uint8_t nn = p.digest_length; KRML_MAYBE_FOR2(i, 0U, 2U, @@ -679,40 +761,220 @@ void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash) Lib_Memzero0_memzero(b, 64U, uint8_t, void *); } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void) +static Hacl_Hash_Blake2b_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); - Hacl_Hash_Blake2b_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2b_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2b_state_t *p = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t)); p[0U] = s; - Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + General-purpose allocation function that gives control over all +Blake2 parameters, including the key. Further resettings of the state SHALL be +done with `reset_with_params_and_key`, and SHALL feature the exact same values +for the `key_length` and `digest_length` fields as passed here. In other words, +once you commit to a digest and key length, the only way to change these +parameters is to allocate a new object. + +The caller must satisfy the following requirements. +- The length of the key k MUST match the value of the field key_length in the + parameters. +- The key_length must not exceed 32 for S, 64 for B. +- The digest_length must not exceed 32 for S, 64 for B. + */ -void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state) +Hacl_Hash_Blake2b_state_t +*Hacl_Hash_Blake2b_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Specialized allocation function that picks default values for all +parameters, except for the key_length. Further resettings of the state SHALL be +done with `reset_with_key`, and SHALL feature the exact same key length `kk` as +passed here. In other words, once you commit to a key length, the only way to +change this parameter is to allocate a new object. + +The caller must satisfy the following requirements. +- The key_length must not exceed 32 for S, 64 for B. + +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc_with_key(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 64U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + Hacl_Hash_Blake2b_state_t *s = Hacl_Hash_Blake2b_malloc_with_params_and_key(&p0, k); + return s; +} + +/** + Specialized allocation function that picks default values for all +parameters, and has no key. Effectively, this is what you want if you intend to +use Blake2 as a hash function. Further resettings of the state SHALL be done with `reset`. +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void) +{ + return Hacl_Hash_Blake2b_malloc_with_key(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_state_t *s) +{ + Hacl_Hash_Blake2b_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2b_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2b_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } /** - Update function when there is no key; 0 = success, 1 = max length exceeded + General-purpose re-initialization function with parameters and +key. You cannot change digest_length or key_length, meaning those values in +the parameters object must be the same as originally decided via one of the +malloc functions. All other values of the parameter can be changed. The behavior +is unspecified if you violate this precondition. +*/ +void +Hacl_Hash_Blake2b_reset_with_key_and_params( + Hacl_Hash_Blake2b_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Specialized-purpose re-initialization function with no parameters, +and a key. The key length must be the same as originally decided via your choice +of malloc function. All other parameters are reset to their default values. The +original call to malloc MUST have set digest_length to the default value. The +behavior is unspecified if you violate this precondition. +*/ +void Hacl_Hash_Blake2b_reset_with_key(Hacl_Hash_Blake2b_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Specialized-purpose re-initialization function with no parameters +and no key. This is what you want if you intend to use Blake2 as a hash +function. The key length and digest length must have been set to their +respective default values via your choice of malloc function (always true if you +used `malloc`). All other parameters are reset to their default values. The +behavior is unspecified if you violate this precondition. +*/ +void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *s) +{ + Hacl_Hash_Blake2b_reset_with_key(s, NULL); +} + +/** + Update function; 0 = success, 1 = max length exceeded */ Hacl_Streaming_Types_error_code Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len) @@ -778,8 +1040,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_update_multi(128U, wv, @@ -802,8 +1065,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_update_multi(data1_len, wv, @@ -869,8 +1133,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_update_multi(128U, wv, @@ -894,8 +1159,9 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - uint64_t *wv = block_state1.fst; - uint64_t *hash = block_state1.snd; + K____uint64_t___uint64_t_ acc = block_state1.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_update_multi(data1_len, wv, @@ -919,10 +1185,20 @@ Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint3 } /** - Finish function when there is no key + Digest function. This function expects the `output` array to hold +at least `digest_length` bytes, where `digest_length` was determined by your +choice of `malloc` function. Concretely, if you used `malloc` or +`malloc_with_key`, then the expected length is 32 for S, or 64 for B (default +digest length). If you used `malloc_with_params_and_key`, then the expected +length is whatever you chose for the `digest_length` field of your +parameters. */ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2b_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2b_state_t scrut = *state; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -939,9 +1215,11 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) uint8_t *buf_1 = buf_; uint64_t wv0[16U] = { 0U }; uint64_t b[16U] = { 0U }; - Hacl_Hash_Blake2b_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - uint64_t *src_b = block_state.snd; - uint64_t *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2b_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + uint64_t *src_b = block_state.thd.snd; + uint64_t *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -955,8 +1233,9 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - uint64_t *wv1 = tmp_block_state.fst; - uint64_t *hash0 = tmp_block_state.snd; + K____uint64_t___uint64_t_ acc0 = tmp_block_state.thd; + uint64_t *wv1 = acc0.fst; + uint64_t *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2b_update_multi(0U, wv1, @@ -965,15 +1244,17 @@ void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output) buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - uint64_t *wv = tmp_block_state.fst; - uint64_t *hash = tmp_block_state.snd; + K____uint64_t___uint64_t_ acc = tmp_block_state.thd; + uint64_t *wv = acc.fst; + uint64_t *hash = acc.snd; Hacl_Hash_Blake2b_update_last(r, wv, hash, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - Hacl_Hash_Blake2b_finish(64U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2b_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -984,14 +1265,43 @@ void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state) Hacl_Hash_Blake2b_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state; - uint64_t *wv = block_state.fst; - uint64_t *b = block_state.snd; + uint64_t *b = block_state.thd.snd; + uint64_t *wv = block_state.thd.fst; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. This preserves all parameters. +*/ +Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_copy(Hacl_Hash_Blake2b_state_t *state) +{ + Hacl_Hash_Blake2b_state_t scrut = *state; + Hacl_Hash_Blake2b_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); + memcpy(buf, buf0, 128U * sizeof (uint8_t)); + uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); + uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t)); + Hacl_Hash_Blake2b_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + uint64_t *src_b = block_state0.thd.snd; + uint64_t *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 16U * sizeof (uint64_t)); + Hacl_Hash_Blake2b_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2b_state_t + *p = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t)); + p[0U] = s; + return p; +} + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -1021,3 +1331,105 @@ Hacl_Hash_Blake2b_hash_with_key( Lib_Memzero0_memzero(b, 16U, uint64_t, void *); } +/** +Write the BLAKE2b digest of message `input` using key `key` and +parameters `params` into `output`. The `key` array must be of length +`params.key_length`. The `output` array must be of length +`params.digest_length`. +*/ +void +Hacl_Hash_Blake2b_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + uint64_t b[16U] = { 0U }; + uint64_t b1[16U] = { 0U }; + uint64_t tmp[8U] = { 0U }; + uint64_t *r0 = b; + uint64_t *r1 = b + 4U; + uint64_t *r2 = b + 8U; + uint64_t *r3 = b + 12U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + uint8_t kk = params.key_length; + uint8_t nn = params.digest_length; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ + ((uint64_t)params.fanout + << 16U + ^ ((uint64_t)params.depth << 24U ^ (uint64_t)params.leaf_length << 32U))); + tmp[1U] = params.node_offset; + tmp[2U] = (uint64_t)params.node_depth ^ (uint64_t)params.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2b_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 16U, uint64_t, void *); + Lib_Memzero0_memzero(b, 16U, uint64_t, void *); +} + diff --git a/src/msvc/Hacl_Hash_Blake2b_Simd256.c b/src/msvc/Hacl_Hash_Blake2b_Simd256.c index 7aea4d42..0afd93bc 100644 --- a/src/msvc/Hacl_Hash_Blake2b_Simd256.c +++ b/src/msvc/Hacl_Hash_Blake2b_Simd256.c @@ -78,22 +78,22 @@ update_block( Lib_IntVector_Intrinsics_vec256 *r1 = m_st + 1U; Lib_IntVector_Intrinsics_vec256 *r20 = m_st + 2U; Lib_IntVector_Intrinsics_vec256 *r30 = m_st + 3U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); @@ -215,29 +215,100 @@ update_block( void Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn) { + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = 64U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; uint64_t tmp[8U] = { 0U }; Lib_IntVector_Intrinsics_vec256 *r0 = hash; Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U; Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U; Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U; - uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U]; - uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U]; - uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U]; - uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U]; - uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U]; - uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U]; - uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U]; - uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U]; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - uint8_t salt[16U] = { 0U }; - uint8_t personal[16U] = { 0U }; - Hacl_Hash_Blake2s_blake2_params - p = - { - .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, - .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal - }; + uint8_t kk1 = (uint8_t)kk; + uint8_t nn1 = (uint8_t)nn; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn1 + ^ + ((uint64_t)kk1 + << 8U + ^ ((uint64_t)p.fanout << 16U ^ ((uint64_t)p.depth << 24U ^ (uint64_t)p.leaf_length << 32U))); + tmp[1U] = p.node_offset; + tmp[2U] = (uint64_t)p.node_depth ^ (uint64_t)p.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); +} + +static void +init_with_params(Lib_IntVector_Intrinsics_vec256 *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = hash; + Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk = p.key_length; + uint8_t nn = p.digest_length; KRML_MAYBE_FOR2(i, 0U, 2U, @@ -521,10 +592,11 @@ Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void) return buf; } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) +static Hacl_Hash_Blake2b_Simd256_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec256 @@ -537,33 +609,199 @@ Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Hacl_Hash_Blake2b_Simd256_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2b_Simd256_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_Simd256_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2b_Simd256_state_t *p = (Hacl_Hash_Blake2b_Simd256_state_t *)KRML_HOST_MALLOC(sizeof ( Hacl_Hash_Blake2b_Simd256_state_t )); p[0U] = s; - Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (256 for S, 64 for B).) +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (256 for S, 64 for B). +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_malloc_with_key0(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 64U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); + uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params + *p0 = + (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); + p0[0U] = p; + Hacl_Hash_Blake2b_Simd256_state_t + *s = Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key(p0, k); + Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; + KRML_HOST_FREE(p1.salt); + KRML_HOST_FREE(p1.personal); + KRML_HOST_FREE(p0); + return s; +} + +/** + State allocation function when there is no key */ -void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state) +Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void) +{ + return Hacl_Hash_Blake2b_Simd256_malloc_with_key0(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2b_Simd256_state_t *s) +{ + Hacl_Hash_Blake2b_Simd256_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2b_Simd256_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (128U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 128U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2b_Simd256_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2b_Simd256_reset_with_key_and_params( + Hacl_Hash_Blake2b_Simd256_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2b_Simd256_reset_with_key(Hacl_Hash_Blake2b_Simd256_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[16U] = { 0U }; + uint8_t personal[16U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Re-initialization function when there is no key +*/ +void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *s) +{ + Hacl_Hash_Blake2b_Simd256_reset_with_key(s, NULL); +} + /** Update function when there is no key; 0 = success, 1 = max length exceeded */ @@ -635,8 +873,10 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_Simd256_update_multi(128U, wv, @@ -659,8 +899,9 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_Simd256_update_multi(data1_len, wv, @@ -726,8 +967,10 @@ Hacl_Hash_Blake2b_Simd256_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2b_Simd256_update_multi(128U, wv, @@ -751,8 +994,9 @@ Hacl_Hash_Blake2b_Simd256_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; uint32_t nb = data1_len / 128U; Hacl_Hash_Blake2b_Simd256_update_multi(data1_len, wv, @@ -781,6 +1025,10 @@ Hacl_Hash_Blake2b_Simd256_update( void Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -797,9 +1045,11 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 uint8_t *buf_1 = buf_; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; - Hacl_Hash_Blake2b_Simd256_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd; - Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2b_Simd256_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -813,8 +1063,10 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc0 = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec256 *wv1 = acc0.fst; + Lib_IntVector_Intrinsics_vec256 *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2b_Simd256_update_multi(0U, wv1, @@ -823,15 +1075,18 @@ Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8 buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - Lib_IntVector_Intrinsics_vec256 *wv = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec256 *hash = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec256___Lib_IntVector_Intrinsics_vec256_ + acc = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec256 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec256 *hash = acc.snd; Hacl_Hash_Blake2b_Simd256_update_last(r, wv, hash, FStar_UInt128_uint64_to_uint128(prev_len_last), r, buf_last); - Hacl_Hash_Blake2b_Simd256_finish(64U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2b_Simd256_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -842,14 +1097,55 @@ void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state) Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec256 *wv = block_state.fst; - Lib_IntVector_Intrinsics_vec256 *b = block_state.snd; + Lib_IntVector_Intrinsics_vec256 *b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec256 *wv = block_state.thd.fst; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2b_Simd256_state_t +*Hacl_Hash_Blake2b_Simd256_copy(Hacl_Hash_Blake2b_Simd256_state_t *state) +{ + Hacl_Hash_Blake2b_Simd256_state_t scrut = *state; + Hacl_Hash_Blake2b_Simd256_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t)); + memcpy(buf, buf0, 128U * sizeof (uint8_t)); + Lib_IntVector_Intrinsics_vec256 + *wv = + (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, + sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); + memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 + *b = + (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, + sizeof (Lib_IntVector_Intrinsics_vec256) * 4U); + memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Hacl_Hash_Blake2b_Simd256_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + Lib_IntVector_Intrinsics_vec256 *src_b = block_state0.thd.snd; + Lib_IntVector_Intrinsics_vec256 *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Hacl_Hash_Blake2b_Simd256_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2b_Simd256_state_t + *p = + (Hacl_Hash_Blake2b_Simd256_state_t *)KRML_HOST_MALLOC(sizeof ( + Hacl_Hash_Blake2b_Simd256_state_t + )); + p[0U] = s; + return p; +} + /** Write the BLAKE2b digest of message `input` using key `key` into `output`. @@ -879,3 +1175,87 @@ Hacl_Hash_Blake2b_Simd256_hash_with_key( Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256, void *); } +void +Hacl_Hash_Blake2b_Simd256_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b1[4U] KRML_POST_ALIGN(32) = { 0U }; + uint64_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec256 *r0 = b; + Lib_IntVector_Intrinsics_vec256 *r1 = b + 1U; + Lib_IntVector_Intrinsics_vec256 *r2 = b + 2U; + Lib_IntVector_Intrinsics_vec256 *r3 = b + 3U; + uint64_t iv0 = Hacl_Hash_Blake2b_ivTable_B[0U]; + uint64_t iv1 = Hacl_Hash_Blake2b_ivTable_B[1U]; + uint64_t iv2 = Hacl_Hash_Blake2b_ivTable_B[2U]; + uint64_t iv3 = Hacl_Hash_Blake2b_ivTable_B[3U]; + uint64_t iv4 = Hacl_Hash_Blake2b_ivTable_B[4U]; + uint64_t iv5 = Hacl_Hash_Blake2b_ivTable_B[5U]; + uint64_t iv6 = Hacl_Hash_Blake2b_ivTable_B[6U]; + uint64_t iv7 = Hacl_Hash_Blake2b_ivTable_B[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); + uint8_t kk = params.key_length; + uint8_t nn = params.digest_length; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint64_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 8U; + uint64_t u = load64_le(bj); + uint64_t r = u; + uint64_t x = r; + os[i] = x;); + tmp[0U] = + (uint64_t)nn + ^ + ((uint64_t)kk + << 8U + ^ + ((uint64_t)params.fanout + << 16U + ^ ((uint64_t)params.depth << 24U ^ (uint64_t)params.leaf_length << 32U))); + tmp[1U] = params.node_offset; + tmp[2U] = (uint64_t)params.node_depth ^ (uint64_t)params.inner_length << 8U; + tmp[3U] = 0ULL; + uint64_t tmp0 = tmp[0U]; + uint64_t tmp1 = tmp[1U]; + uint64_t tmp2 = tmp[2U]; + uint64_t tmp3 = tmp[3U]; + uint64_t tmp4 = tmp[4U]; + uint64_t tmp5 = tmp[5U]; + uint64_t tmp6 = tmp[6U]; + uint64_t tmp7 = tmp[7U]; + uint64_t iv0_ = iv0 ^ tmp0; + uint64_t iv1_ = iv1 ^ tmp1; + uint64_t iv2_ = iv2 ^ tmp2; + uint64_t iv3_ = iv3 ^ tmp3; + uint64_t iv4_ = iv4 ^ tmp4; + uint64_t iv5_ = iv5 ^ tmp5; + uint64_t iv6_ = iv6 ^ tmp6; + uint64_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4_, iv5_, iv6_, iv7_); + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2b_Simd256_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256, void *); +} + diff --git a/src/msvc/Hacl_Hash_Blake2s.c b/src/msvc/Hacl_Hash_Blake2s.c index 37fabb67..6e19d83d 100644 --- a/src/msvc/Hacl_Hash_Blake2s.c +++ b/src/msvc/Hacl_Hash_Blake2s.c @@ -77,22 +77,22 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * uint32_t *r1 = m_st + 4U; uint32_t *r20 = m_st + 8U; uint32_t *r30 = m_st + 12U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; uint32_t uu____0 = m_w[s2]; uint32_t uu____1 = m_w[s4]; uint32_t uu____2 = m_w[s6]; @@ -475,19 +475,104 @@ update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t * void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) { + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, + .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal + }; uint32_t tmp[8U] = { 0U }; uint32_t *r0 = hash; uint32_t *r1 = hash + 4U; uint32_t *r2 = hash + 8U; uint32_t *r3 = hash + 12U; - uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U]; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)(uint8_t)nn + ^ ((uint32_t)(uint8_t)kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; +} + +static void init_with_params(uint32_t *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = hash; + uint32_t *r1 = hash + 4U; + uint32_t *r2 = hash + 8U; + uint32_t *r3 = hash + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; r2[0U] = iv0; r2[1U] = iv1; r2[2U] = iv2; @@ -496,14 +581,6 @@ void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) r3[1U] = iv5; r3[2U] = iv6; r3[3U] = iv7; - uint8_t salt[8U] = { 0U }; - uint8_t personal[8U] = { 0U }; - Hacl_Hash_Blake2s_blake2_params - p = - { - .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, - .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal - }; KRML_MAYBE_FOR2(i, 0U, 2U, @@ -524,7 +601,9 @@ void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) uint32_t r = u; uint32_t x = r; os[i] = x;); - tmp[0U] = nn ^ (kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[0U] = + (uint32_t)p.digest_length + ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); tmp[1U] = p.leaf_length; tmp[2U] = (uint32_t)p.node_offset; tmp[3U] = @@ -667,38 +746,200 @@ void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash) Lib_Memzero0_memzero(b, 32U, uint8_t, void *); } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) +static Hacl_Hash_Blake2s_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); - Hacl_Hash_Blake2s_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2s_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2s_state_t *p = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t)); p[0U] = s; - Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (32 for S, 64 for B).) +*/ +Hacl_Hash_Blake2s_state_t +*Hacl_Hash_Blake2s_malloc_with_params_and_key(Hacl_Hash_Blake2b_blake2_params *p, uint8_t *k) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (32 for S, 64 for B). +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc_with_key(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 32U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params + *p0 = + (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); + p0[0U] = p; + Hacl_Hash_Blake2s_state_t *s = Hacl_Hash_Blake2s_malloc_with_params_and_key(p0, k); + Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; + KRML_HOST_FREE(p1.salt); + KRML_HOST_FREE(p1.personal); + KRML_HOST_FREE(p0); + return s; +} + +/** + State allocation function when there is no key */ -void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state) +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void) +{ + return Hacl_Hash_Blake2s_malloc_with_key(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_state_t *s) +{ + Hacl_Hash_Blake2s_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2s_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2s_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_reset_with_key_and_params( + Hacl_Hash_Blake2s_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2s_reset_with_key(Hacl_Hash_Blake2s_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Re-initialization function when there is no key +*/ +void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *s) +{ + Hacl_Hash_Blake2s_reset_with_key(s, NULL); +} + /** Update function when there is no key; 0 = success, 1 = max length exceeded */ @@ -766,8 +1007,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -785,8 +1027,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -847,8 +1090,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -867,8 +1111,9 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - uint32_t *wv = block_state1.fst; - uint32_t *hash = block_state1.snd; + K____uint32_t___uint32_t_ acc = block_state1.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -891,6 +1136,10 @@ Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint3 */ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2s_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2s_state_t scrut = *state; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -907,9 +1156,11 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) uint8_t *buf_1 = buf_; uint32_t wv0[16U] = { 0U }; uint32_t b[16U] = { 0U }; - Hacl_Hash_Blake2s_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - uint32_t *src_b = block_state.snd; - uint32_t *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2s_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + uint32_t *src_b = block_state.thd.snd; + uint32_t *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -923,15 +1174,18 @@ void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output) } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - uint32_t *wv1 = tmp_block_state.fst; - uint32_t *hash0 = tmp_block_state.snd; + K____uint32_t___uint32_t_ acc0 = tmp_block_state.thd; + uint32_t *wv1 = acc0.fst; + uint32_t *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - uint32_t *wv = tmp_block_state.fst; - uint32_t *hash = tmp_block_state.snd; + K____uint32_t___uint32_t_ acc = tmp_block_state.thd; + uint32_t *wv = acc.fst; + uint32_t *hash = acc.snd; Hacl_Hash_Blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last); - Hacl_Hash_Blake2s_finish(32U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2s_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -942,19 +1196,48 @@ void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state) Hacl_Hash_Blake2s_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state; - uint32_t *wv = block_state.fst; - uint32_t *b = block_state.snd; + uint32_t *b = block_state.thd.snd; + uint32_t *wv = block_state.thd.fst; KRML_HOST_FREE(wv); KRML_HOST_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_copy(Hacl_Hash_Blake2s_state_t *state) +{ + Hacl_Hash_Blake2s_state_t scrut = *state; + Hacl_Hash_Blake2s_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); + memcpy(buf, buf0, 64U * sizeof (uint8_t)); + uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); + uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t)); + Hacl_Hash_Blake2s_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + uint32_t *src_b = block_state0.thd.snd; + uint32_t *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 16U * sizeof (uint32_t)); + Hacl_Hash_Blake2s_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2s_state_t + *p = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t)); + p[0U] = s; + return p; +} + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -979,3 +1262,96 @@ Hacl_Hash_Blake2s_hash_with_key( Lib_Memzero0_memzero(b, 16U, uint32_t, void *); } +void +Hacl_Hash_Blake2s_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + uint32_t b[16U] = { 0U }; + uint32_t b1[16U] = { 0U }; + uint32_t tmp[8U] = { 0U }; + uint32_t *r0 = b; + uint32_t *r1 = b + 4U; + uint32_t *r2 = b + 8U; + uint32_t *r3 = b + 12U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = iv0; + r2[1U] = iv1; + r2[2U] = iv2; + r2[3U] = iv3; + r3[0U] = iv4; + r3[1U] = iv5; + r3[2U] = iv6; + r3[3U] = iv7; + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)params.digest_length + ^ + ((uint32_t)params.key_length + << 8U + ^ ((uint32_t)params.fanout << 16U ^ (uint32_t)params.depth << 24U)); + tmp[1U] = params.leaf_length; + tmp[2U] = (uint32_t)params.node_offset; + tmp[3U] = + (uint32_t)(params.node_offset >> 32U) + ^ ((uint32_t)params.node_depth << 16U ^ (uint32_t)params.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = iv0_; + r0[1U] = iv1_; + r0[2U] = iv2_; + r0[3U] = iv3_; + r1[0U] = iv4_; + r1[1U] = iv5_; + r1[2U] = iv6_; + r1[3U] = iv7_; + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2s_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 16U, uint32_t, void *); + Lib_Memzero0_memzero(b, 16U, uint32_t, void *); +} + diff --git a/src/msvc/Hacl_Hash_Blake2s_Simd128.c b/src/msvc/Hacl_Hash_Blake2s_Simd128.c index ed86be43..c02da8fa 100644 --- a/src/msvc/Hacl_Hash_Blake2s_Simd128.c +++ b/src/msvc/Hacl_Hash_Blake2s_Simd128.c @@ -78,22 +78,22 @@ update_block( Lib_IntVector_Intrinsics_vec128 *r1 = m_st + 1U; Lib_IntVector_Intrinsics_vec128 *r20 = m_st + 2U; Lib_IntVector_Intrinsics_vec128 *r30 = m_st + 3U; - uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U]; - uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U]; - uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U]; - uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U]; - uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U]; - uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U]; - uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U]; - uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U]; - uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U]; - uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U]; - uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U]; - uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U]; - uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U]; - uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U]; - uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U]; - uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U]; + uint32_t s0 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 0U]; + uint32_t s1 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 1U]; + uint32_t s2 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 2U]; + uint32_t s3 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 3U]; + uint32_t s4 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 4U]; + uint32_t s5 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 5U]; + uint32_t s6 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 6U]; + uint32_t s7 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 7U]; + uint32_t s8 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 8U]; + uint32_t s9 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 9U]; + uint32_t s10 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 10U]; + uint32_t s11 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 11U]; + uint32_t s12 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 12U]; + uint32_t s13 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 13U]; + uint32_t s14 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 14U]; + uint32_t s15 = Hacl_Hash_Blake2b_sigmaTable[start_idx + 15U]; r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); @@ -215,29 +215,95 @@ update_block( void Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn) { - uint32_t tmp[8U] = { 0U }; - Lib_IntVector_Intrinsics_vec128 *r0 = hash; - Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; - Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; - Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U; - uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U]; - uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U]; - uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U]; - uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U]; - uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U]; - uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U]; - uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U]; - uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U]; - r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); uint8_t salt[8U] = { 0U }; uint8_t personal[8U] = { 0U }; - Hacl_Hash_Blake2s_blake2_params + Hacl_Hash_Blake2b_blake2_params p = { .digest_length = 32U, .key_length = 0U, .fanout = 1U, .depth = 1U, .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, .personal = personal }; + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = hash; + Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = p.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = p.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)(uint8_t)nn + ^ ((uint32_t)(uint8_t)kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[1U] = p.leaf_length; + tmp[2U] = (uint32_t)p.node_offset; + tmp[3U] = + (uint32_t)(p.node_offset >> 32U) + ^ ((uint32_t)p.node_depth << 16U ^ (uint32_t)p.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); +} + +static void +init_with_params(Lib_IntVector_Intrinsics_vec128 *hash, Hacl_Hash_Blake2b_blake2_params p) +{ + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = hash; + Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); KRML_MAYBE_FOR2(i, 0U, 2U, @@ -258,7 +324,9 @@ Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t k uint32_t r = u; uint32_t x = r; os[i] = x;); - tmp[0U] = nn ^ (kk << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); + tmp[0U] = + (uint32_t)p.digest_length + ^ ((uint32_t)p.key_length << 8U ^ ((uint32_t)p.fanout << 16U ^ (uint32_t)p.depth << 24U)); tmp[1U] = p.leaf_length; tmp[2U] = (uint32_t)p.node_offset; tmp[3U] = @@ -514,10 +582,11 @@ Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void) return buf; } -/** - State allocation function when there is no key -*/ -Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) +static Hacl_Hash_Blake2s_Simd128_state_t +*malloc_raw( + Hacl_Hash_Blake2b_index kk, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); Lib_IntVector_Intrinsics_vec128 @@ -530,33 +599,199 @@ Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - Hacl_Hash_Blake2s_Simd128_block_state_t block_state = { .fst = wv, .snd = b }; + Hacl_Hash_Blake2s_Simd128_block_state_t + block_state = { .fst = kk.key_length, .snd = kk.digest_length, .thd = { .fst = wv, .snd = b } }; + uint8_t kk10 = kk.key_length; + uint32_t ite; + if (kk10 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_Simd128_state_t - s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; Hacl_Hash_Blake2s_Simd128_state_t *p = (Hacl_Hash_Blake2s_Simd128_state_t *)KRML_HOST_MALLOC(sizeof ( Hacl_Hash_Blake2s_Simd128_state_t )); p[0U] = s; - Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U); + Hacl_Hash_Blake2b_blake2_params *p1 = key.fst; + uint8_t kk1 = p1->key_length; + uint8_t nn = p1->digest_length; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p1[0U]; + init_with_params(block_state.thd.snd, pv); return p; } /** - Re-initialization function when there is no key + State allocation function when there are parameters and a key. The +length of the key k MUST match the value of the field key_length in the +parameters. Furthermore, there is a static (not dynamically checked) requirement +that key_length does not exceed max_key (128 for S, 64 for B).) */ -void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state) +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key( + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + Hacl_Hash_Blake2b_index + i1 = { .key_length = pv.key_length, .digest_length = pv.digest_length }; + return + malloc_raw(i1, + ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + State allocation function when there is just a custom key. All +other parameters are set to their respective default values, meaning the output +length is the maximum allowed output (128 for S, 64 for B). +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_malloc_with_key0(uint8_t *k, uint8_t kk) +{ + uint8_t nn = 32U; + Hacl_Hash_Blake2b_index i = { .key_length = kk, .digest_length = nn }; + uint8_t *salt = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + uint8_t *personal = (uint8_t *)KRML_HOST_CALLOC(8U, sizeof (uint8_t)); + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = i.digest_length, .key_length = i.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params + *p0 = + (Hacl_Hash_Blake2b_blake2_params *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_blake2_params)); + p0[0U] = p; + Hacl_Hash_Blake2s_Simd128_state_t + *s = Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key(p0, k); + Hacl_Hash_Blake2b_blake2_params p1 = p0[0U]; + KRML_HOST_FREE(p1.salt); + KRML_HOST_FREE(p1.personal); + KRML_HOST_FREE(p0); + return s; +} + +/** + State allocation function when there is no key +*/ +Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void) +{ + return Hacl_Hash_Blake2s_Simd128_malloc_with_key0(NULL, 0U); +} + +static Hacl_Hash_Blake2b_index index_of_state(Hacl_Hash_Blake2s_Simd128_state_t *s) +{ + Hacl_Hash_Blake2s_Simd128_block_state_t block_state = (*s).block_state; + uint8_t nn = block_state.snd; + uint8_t kk1 = block_state.fst; + return ((Hacl_Hash_Blake2b_index){ .key_length = kk1, .digest_length = nn }); +} + +static void +reset_raw( + Hacl_Hash_Blake2s_Simd128_state_t *state, + K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_ key +) { Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; - Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U); + uint8_t nn0 = block_state.snd; + uint8_t kk10 = block_state.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk10, .digest_length = nn0 }; + KRML_MAYBE_UNUSED_VAR(i); + Hacl_Hash_Blake2b_blake2_params *p = key.fst; + uint8_t kk1 = p->key_length; + uint8_t nn = p->digest_length; + Hacl_Hash_Blake2b_index i1 = { .key_length = kk1, .digest_length = nn }; + uint32_t kk2 = (uint32_t)i1.key_length; + uint8_t *k_1 = key.snd; + if (!(kk2 == 0U)) + { + uint8_t *sub_b = buf + kk2; + memset(sub_b, 0U, (64U - kk2) * sizeof (uint8_t)); + memcpy(buf, k_1, kk2 * sizeof (uint8_t)); + } + Hacl_Hash_Blake2b_blake2_params pv = p[0U]; + init_with_params(block_state.thd.snd, pv); + uint8_t kk11 = i.key_length; + uint32_t ite; + if (kk11 != 0U) + { + ite = 64U; + } + else + { + ite = 0U; + } Hacl_Hash_Blake2s_Simd128_state_t - tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U }; + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)ite }; state[0U] = tmp; } +/** + Re-initialization function. The reinitialization API is tricky -- +you MUST reuse the same original parameters for digest (output) length and key +length. +*/ +void +Hacl_Hash_Blake2s_Simd128_reset_with_key_and_params( + Hacl_Hash_Blake2s_Simd128_state_t *s, + Hacl_Hash_Blake2b_blake2_params *p, + uint8_t *k +) +{ + index_of_state(s); + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = p, .snd = k })); +} + +/** + Re-initialization function when there is a key. Note that the key +size is not allowed to change, which is why this function does not take a key +length -- the key has to be same key size that was originally passed to +`malloc_with_key` +*/ +void Hacl_Hash_Blake2s_Simd128_reset_with_key(Hacl_Hash_Blake2s_Simd128_state_t *s, uint8_t *k) +{ + Hacl_Hash_Blake2b_index idx = index_of_state(s); + uint8_t salt[8U] = { 0U }; + uint8_t personal[8U] = { 0U }; + Hacl_Hash_Blake2b_blake2_params + p = + { + .digest_length = idx.digest_length, .key_length = idx.key_length, .fanout = 1U, .depth = 1U, + .leaf_length = 0U, .node_offset = 0ULL, .node_depth = 0U, .inner_length = 0U, .salt = salt, + .personal = personal + }; + Hacl_Hash_Blake2b_blake2_params p0 = p; + reset_raw(s, ((K____Hacl_Impl_Blake2_Core_blake2_params___uint8_t_){ .fst = &p0, .snd = k })); +} + +/** + Re-initialization function when there is no key +*/ +void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *s) +{ + Hacl_Hash_Blake2s_Simd128_reset_with_key(s, NULL); +} + /** Update function when there is no key; 0 = success, 1 = max length exceeded */ @@ -628,8 +863,10 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -647,8 +884,9 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - data1_len; uint8_t *data1 = chunk; uint8_t *data2 = chunk + data1_len; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -709,8 +947,10 @@ Hacl_Hash_Blake2s_Simd128_update( if (!(sz1 == 0U)) { uint64_t prevlen = total_len1 - (uint64_t)sz1; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = 1U; Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb); } @@ -729,8 +969,9 @@ Hacl_Hash_Blake2s_Simd128_update( uint32_t data2_len = chunk_len - diff - data1_len; uint8_t *data1 = chunk2; uint8_t *data2 = chunk2 + data1_len; - Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst; - Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ acc = block_state1.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; uint32_t nb = data1_len / 64U; Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb); uint8_t *dst = buf; @@ -754,6 +995,10 @@ Hacl_Hash_Blake2s_Simd128_update( void Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output) { + Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = (*state).block_state; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; uint8_t *buf_ = scrut.buf; @@ -770,9 +1015,11 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 uint8_t *buf_1 = buf_; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Hash_Blake2s_Simd128_block_state_t tmp_block_state = { .fst = wv0, .snd = b }; - Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd; - Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd; + Hacl_Hash_Blake2s_Simd128_block_state_t + tmp_block_state = + { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv0, .snd = b } }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.thd.snd; memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); uint64_t prev_len = total_len - (uint64_t)r; uint32_t ite; @@ -786,15 +1033,20 @@ Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8 } uint8_t *buf_last = buf_1 + r - ite; uint8_t *buf_multi = buf_1; - Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc0 = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec128 *wv1 = acc0.fst; + Lib_IntVector_Intrinsics_vec128 *hash0 = acc0.snd; uint32_t nb = 0U; Hacl_Hash_Blake2s_Simd128_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb); uint64_t prev_len_last = total_len - (uint64_t)r; - Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst; - Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd; + K____Lib_IntVector_Intrinsics_vec128___Lib_IntVector_Intrinsics_vec128_ + acc = tmp_block_state.thd; + Lib_IntVector_Intrinsics_vec128 *wv = acc.fst; + Lib_IntVector_Intrinsics_vec128 *hash = acc.snd; Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, prev_len_last, r, buf_last); - Hacl_Hash_Blake2s_Simd128_finish(32U, output, tmp_block_state.snd); + uint8_t nn0 = tmp_block_state.snd; + Hacl_Hash_Blake2s_Simd128_finish((uint32_t)nn0, output, tmp_block_state.thd.snd); } /** @@ -805,19 +1057,60 @@ void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state) Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; uint8_t *buf = scrut.buf; Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec128 *wv = block_state.fst; - Lib_IntVector_Intrinsics_vec128 *b = block_state.snd; + Lib_IntVector_Intrinsics_vec128 *b = block_state.thd.snd; + Lib_IntVector_Intrinsics_vec128 *wv = block_state.thd.fst; KRML_ALIGNED_FREE(wv); KRML_ALIGNED_FREE(b); KRML_HOST_FREE(buf); KRML_HOST_FREE(state); } +/** + Copying. The key length (or absence thereof) must match between source and destination. +*/ +Hacl_Hash_Blake2s_Simd128_state_t +*Hacl_Hash_Blake2s_Simd128_copy(Hacl_Hash_Blake2s_Simd128_state_t *state) +{ + Hacl_Hash_Blake2s_Simd128_state_t scrut = *state; + Hacl_Hash_Blake2s_Simd128_block_state_t block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t nn = block_state0.snd; + uint8_t kk1 = block_state0.fst; + Hacl_Hash_Blake2b_index i = { .key_length = kk1, .digest_length = nn }; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t)); + memcpy(buf, buf0, 64U * sizeof (uint8_t)); + Lib_IntVector_Intrinsics_vec128 + *wv = + (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, + sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); + memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); + Lib_IntVector_Intrinsics_vec128 + *b = + (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, + sizeof (Lib_IntVector_Intrinsics_vec128) * 4U); + memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); + Hacl_Hash_Blake2s_Simd128_block_state_t + block_state = { .fst = i.key_length, .snd = i.digest_length, .thd = { .fst = wv, .snd = b } }; + Lib_IntVector_Intrinsics_vec128 *src_b = block_state0.thd.snd; + Lib_IntVector_Intrinsics_vec128 *dst_b = block_state.thd.snd; + memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128)); + Hacl_Hash_Blake2s_Simd128_state_t + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Hash_Blake2s_Simd128_state_t + *p = + (Hacl_Hash_Blake2s_Simd128_state_t *)KRML_HOST_MALLOC(sizeof ( + Hacl_Hash_Blake2s_Simd128_state_t + )); + p[0U] = s; + return p; +} + /** Write the BLAKE2s digest of message `input` using key `key` into `output`. @param output Pointer to `output_len` bytes of memory where the digest is written to. -@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32. +@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64. @param input Pointer to `input_len` bytes of memory where the input message is read from. @param input_len Length of the input message. @param key Pointer to `key_len` bytes of memory where the key is read from. @@ -842,3 +1135,84 @@ Hacl_Hash_Blake2s_Simd128_hash_with_key( Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128, void *); } +void +Hacl_Hash_Blake2s_Simd128_hash_with_key_and_paramas( + uint8_t *output, + uint8_t *input, + uint32_t input_len, + Hacl_Hash_Blake2b_blake2_params params, + uint8_t *key +) +{ + KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; + KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b1[4U] KRML_POST_ALIGN(16) = { 0U }; + uint32_t tmp[8U] = { 0U }; + Lib_IntVector_Intrinsics_vec128 *r0 = b; + Lib_IntVector_Intrinsics_vec128 *r1 = b + 1U; + Lib_IntVector_Intrinsics_vec128 *r2 = b + 2U; + Lib_IntVector_Intrinsics_vec128 *r3 = b + 3U; + uint32_t iv0 = Hacl_Hash_Blake2b_ivTable_S[0U]; + uint32_t iv1 = Hacl_Hash_Blake2b_ivTable_S[1U]; + uint32_t iv2 = Hacl_Hash_Blake2b_ivTable_S[2U]; + uint32_t iv3 = Hacl_Hash_Blake2b_ivTable_S[3U]; + uint32_t iv4 = Hacl_Hash_Blake2b_ivTable_S[4U]; + uint32_t iv5 = Hacl_Hash_Blake2b_ivTable_S[5U]; + uint32_t iv6 = Hacl_Hash_Blake2b_ivTable_S[6U]; + uint32_t iv7 = Hacl_Hash_Blake2b_ivTable_S[7U]; + r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); + r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 4U; + uint8_t *bj = params.salt + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + KRML_MAYBE_FOR2(i, + 0U, + 2U, + 1U, + uint32_t *os = tmp + 6U; + uint8_t *bj = params.personal + i * 4U; + uint32_t u = load32_le(bj); + uint32_t r = u; + uint32_t x = r; + os[i] = x;); + tmp[0U] = + (uint32_t)params.digest_length + ^ + ((uint32_t)params.key_length + << 8U + ^ ((uint32_t)params.fanout << 16U ^ (uint32_t)params.depth << 24U)); + tmp[1U] = params.leaf_length; + tmp[2U] = (uint32_t)params.node_offset; + tmp[3U] = + (uint32_t)(params.node_offset >> 32U) + ^ ((uint32_t)params.node_depth << 16U ^ (uint32_t)params.inner_length << 24U); + uint32_t tmp0 = tmp[0U]; + uint32_t tmp1 = tmp[1U]; + uint32_t tmp2 = tmp[2U]; + uint32_t tmp3 = tmp[3U]; + uint32_t tmp4 = tmp[4U]; + uint32_t tmp5 = tmp[5U]; + uint32_t tmp6 = tmp[6U]; + uint32_t tmp7 = tmp[7U]; + uint32_t iv0_ = iv0 ^ tmp0; + uint32_t iv1_ = iv1 ^ tmp1; + uint32_t iv2_ = iv2 ^ tmp2; + uint32_t iv3_ = iv3 ^ tmp3; + uint32_t iv4_ = iv4 ^ tmp4; + uint32_t iv5_ = iv5 ^ tmp5; + uint32_t iv6_ = iv6 ^ tmp6; + uint32_t iv7_ = iv7 ^ tmp7; + r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1_, iv2_, iv3_); + r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4_, iv5_, iv6_, iv7_); + update(b1, b, (uint32_t)params.key_length, key, input_len, input); + Hacl_Hash_Blake2s_Simd128_finish((uint32_t)params.digest_length, output, b); + Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128, void *); + Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128, void *); +} + diff --git a/src/msvc/Hacl_Hash_SHA3.c b/src/msvc/Hacl_Hash_SHA3.c index b8551af3..89bb0491 100644 --- a/src/msvc/Hacl_Hash_SHA3.c +++ b/src/msvc/Hacl_Hash_SHA3.c @@ -53,6 +53,123 @@ Hacl_Hash_SHA3_keccak_rndc[24U] = 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL }; +static void absorb_inner_32(uint8_t *b, uint64_t *s) +{ + uint64_t ws[32U] = { 0U }; + uint8_t *b1 = b; + uint64_t u = load64_le(b1); + ws[0U] = u; + uint64_t u0 = load64_le(b1 + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b1 + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b1 + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b1 + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b1 + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b1 + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b1 + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b1 + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b1 + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b1 + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b1 + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b1 + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b1 + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b1 + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b1 + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b1 + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b1 + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b1 + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b1 + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b1 + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b1 + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b1 + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b1 + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b1 + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b1 + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b1 + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b1 + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b1 + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b1 + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b1 + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b1 + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } +} + static uint32_t block_len(Spec_Hash_Definitions_hash_alg a) { switch (a) @@ -126,16 +243,55 @@ Hacl_Hash_SHA3_update_multi_sha3( ) { uint32_t l = block_len(a) * n_blocks; - for (uint32_t i0 = 0U; i0 < l / block_len(a); i0++) + for (uint32_t i = 0U; i < l / block_len(a); i++) { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; uint8_t *b0 = blocks; uint8_t *bl0 = b_; - uint8_t *uu____0 = b0 + i0 * block_len(a); + uint8_t *uu____0 = b0 + i * block_len(a); memcpy(bl0, uu____0, block_len(a) * sizeof (uint8_t)); + block_len(a); + absorb_inner_32(b_, s); + } +} + +void +Hacl_Hash_SHA3_update_last_sha3( + Spec_Hash_Definitions_hash_alg a, + uint64_t *s, + uint8_t *input, + uint32_t input_len +) +{ + uint8_t suffix; + if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256) + { + suffix = 0x1fU; + } + else + { + suffix = 0x06U; + } + uint32_t len = block_len(a); + if (input_len == len) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b00 = input; + uint8_t *bl00 = b_; + memcpy(bl00, b00 + 0U * len, len * sizeof (uint8_t)); + absorb_inner_32(b_, s); + uint8_t b2[256U] = { 0U }; + uint8_t *b_0 = b2; + uint32_t rem = 0U % len; + uint8_t *b01 = input + input_len; + uint8_t *bl0 = b_0; + memcpy(bl0, b01 + 0U - rem, rem * sizeof (uint8_t)); + uint8_t *b02 = b_0; + b02[0U % len] = suffix; uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; + uint8_t *b = b_0; uint64_t u = load64_le(b); ws[0U] = u; uint64_t u0 = load64_le(b + 8U); @@ -204,268 +360,6 @@ Hacl_Hash_SHA3_update_multi_sha3( { s[i] = s[i] ^ ws[i]; } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____1 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____1 << 1U | uu____1 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____2 = current; - s[_Y] = uu____2 << r | uu____2 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - s[0U] = s[0U] ^ c; - } - } -} - -void -Hacl_Hash_SHA3_update_last_sha3( - Spec_Hash_Definitions_hash_alg a, - uint64_t *s, - uint8_t *input, - uint32_t input_len -) -{ - uint8_t suffix; - if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256) - { - suffix = 0x1fU; - } - else - { - suffix = 0x06U; - } - uint32_t len = block_len(a); - if (input_len == len) - { - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; - uint8_t *b00 = input; - uint8_t *bl00 = b_; - memcpy(bl00, b00 + 0U * len, len * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b3 = b_; - uint64_t u0 = load64_le(b3); - ws[0U] = u0; - uint64_t u1 = load64_le(b3 + 8U); - ws[1U] = u1; - uint64_t u2 = load64_le(b3 + 16U); - ws[2U] = u2; - uint64_t u3 = load64_le(b3 + 24U); - ws[3U] = u3; - uint64_t u4 = load64_le(b3 + 32U); - ws[4U] = u4; - uint64_t u5 = load64_le(b3 + 40U); - ws[5U] = u5; - uint64_t u6 = load64_le(b3 + 48U); - ws[6U] = u6; - uint64_t u7 = load64_le(b3 + 56U); - ws[7U] = u7; - uint64_t u8 = load64_le(b3 + 64U); - ws[8U] = u8; - uint64_t u9 = load64_le(b3 + 72U); - ws[9U] = u9; - uint64_t u10 = load64_le(b3 + 80U); - ws[10U] = u10; - uint64_t u11 = load64_le(b3 + 88U); - ws[11U] = u11; - uint64_t u12 = load64_le(b3 + 96U); - ws[12U] = u12; - uint64_t u13 = load64_le(b3 + 104U); - ws[13U] = u13; - uint64_t u14 = load64_le(b3 + 112U); - ws[14U] = u14; - uint64_t u15 = load64_le(b3 + 120U); - ws[15U] = u15; - uint64_t u16 = load64_le(b3 + 128U); - ws[16U] = u16; - uint64_t u17 = load64_le(b3 + 136U); - ws[17U] = u17; - uint64_t u18 = load64_le(b3 + 144U); - ws[18U] = u18; - uint64_t u19 = load64_le(b3 + 152U); - ws[19U] = u19; - uint64_t u20 = load64_le(b3 + 160U); - ws[20U] = u20; - uint64_t u21 = load64_le(b3 + 168U); - ws[21U] = u21; - uint64_t u22 = load64_le(b3 + 176U); - ws[22U] = u22; - uint64_t u23 = load64_le(b3 + 184U); - ws[23U] = u23; - uint64_t u24 = load64_le(b3 + 192U); - ws[24U] = u24; - uint64_t u25 = load64_le(b3 + 200U); - ws[25U] = u25; - uint64_t u26 = load64_le(b3 + 208U); - ws[26U] = u26; - uint64_t u27 = load64_le(b3 + 216U); - ws[27U] = u27; - uint64_t u28 = load64_le(b3 + 224U); - ws[28U] = u28; - uint64_t u29 = load64_le(b3 + 232U); - ws[29U] = u29; - uint64_t u30 = load64_le(b3 + 240U); - ws[30U] = u30; - uint64_t u31 = load64_le(b3 + 248U); - ws[31U] = u31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____1 = current; - s[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } - uint8_t b4[256U] = { 0U }; - uint8_t *b_0 = b4; - uint32_t rem = 0U % len; - uint8_t *b01 = input + input_len; - uint8_t *bl0 = b_0; - memcpy(bl0, b01 + 0U - rem, rem * sizeof (uint8_t)); - uint8_t *b02 = b_0; - b02[0U % len] = suffix; - uint64_t ws0[32U] = { 0U }; - uint8_t *b = b_0; - uint64_t u32 = load64_le(b); - ws0[0U] = u32; - uint64_t u33 = load64_le(b + 8U); - ws0[1U] = u33; - uint64_t u34 = load64_le(b + 16U); - ws0[2U] = u34; - uint64_t u35 = load64_le(b + 24U); - ws0[3U] = u35; - uint64_t u36 = load64_le(b + 32U); - ws0[4U] = u36; - uint64_t u37 = load64_le(b + 40U); - ws0[5U] = u37; - uint64_t u38 = load64_le(b + 48U); - ws0[6U] = u38; - uint64_t u39 = load64_le(b + 56U); - ws0[7U] = u39; - uint64_t u40 = load64_le(b + 64U); - ws0[8U] = u40; - uint64_t u41 = load64_le(b + 72U); - ws0[9U] = u41; - uint64_t u42 = load64_le(b + 80U); - ws0[10U] = u42; - uint64_t u43 = load64_le(b + 88U); - ws0[11U] = u43; - uint64_t u44 = load64_le(b + 96U); - ws0[12U] = u44; - uint64_t u45 = load64_le(b + 104U); - ws0[13U] = u45; - uint64_t u46 = load64_le(b + 112U); - ws0[14U] = u46; - uint64_t u47 = load64_le(b + 120U); - ws0[15U] = u47; - uint64_t u48 = load64_le(b + 128U); - ws0[16U] = u48; - uint64_t u49 = load64_le(b + 136U); - ws0[17U] = u49; - uint64_t u50 = load64_le(b + 144U); - ws0[18U] = u50; - uint64_t u51 = load64_le(b + 152U); - ws0[19U] = u51; - uint64_t u52 = load64_le(b + 160U); - ws0[20U] = u52; - uint64_t u53 = load64_le(b + 168U); - ws0[21U] = u53; - uint64_t u54 = load64_le(b + 176U); - ws0[22U] = u54; - uint64_t u55 = load64_le(b + 184U); - ws0[23U] = u55; - uint64_t u56 = load64_le(b + 192U); - ws0[24U] = u56; - uint64_t u57 = load64_le(b + 200U); - ws0[25U] = u57; - uint64_t u58 = load64_le(b + 208U); - ws0[26U] = u58; - uint64_t u59 = load64_le(b + 216U); - ws0[27U] = u59; - uint64_t u60 = load64_le(b + 224U); - ws0[28U] = u60; - uint64_t u61 = load64_le(b + 232U); - ws0[29U] = u61; - uint64_t u62 = load64_le(b + 240U); - ws0[30U] = u62; - uint64_t u63 = load64_le(b + 248U); - ws0[31U] = u63; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws0[i]; - } if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U % len == len - 1U) { for (uint32_t i0 = 0U; i0 < 24U; i0++) @@ -480,8 +374,8 @@ Hacl_Hash_SHA3_update_last_sha3( 0U, 5U, 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); uint64_t x = s[1U]; uint64_t current = x; @@ -490,8 +384,8 @@ Hacl_Hash_SHA3_update_last_sha3( uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; - uint64_t uu____3 = current; - s[_Y] = uu____3 << r | uu____3 >> (64U - r); + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); current = temp; } KRML_MAYBE_FOR5(i, @@ -512,127 +406,15 @@ Hacl_Hash_SHA3_update_last_sha3( s[0U] = s[0U] ^ c; } } - uint8_t b5[256U] = { 0U }; - uint8_t *b6 = b5; - uint8_t *b0 = b6; + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; b0[len - 1U] = 0x80U; - uint64_t ws1[32U] = { 0U }; - uint8_t *b1 = b6; - uint64_t u = load64_le(b1); - ws1[0U] = u; - uint64_t u64 = load64_le(b1 + 8U); - ws1[1U] = u64; - uint64_t u65 = load64_le(b1 + 16U); - ws1[2U] = u65; - uint64_t u66 = load64_le(b1 + 24U); - ws1[3U] = u66; - uint64_t u67 = load64_le(b1 + 32U); - ws1[4U] = u67; - uint64_t u68 = load64_le(b1 + 40U); - ws1[5U] = u68; - uint64_t u69 = load64_le(b1 + 48U); - ws1[6U] = u69; - uint64_t u70 = load64_le(b1 + 56U); - ws1[7U] = u70; - uint64_t u71 = load64_le(b1 + 64U); - ws1[8U] = u71; - uint64_t u72 = load64_le(b1 + 72U); - ws1[9U] = u72; - uint64_t u73 = load64_le(b1 + 80U); - ws1[10U] = u73; - uint64_t u74 = load64_le(b1 + 88U); - ws1[11U] = u74; - uint64_t u75 = load64_le(b1 + 96U); - ws1[12U] = u75; - uint64_t u76 = load64_le(b1 + 104U); - ws1[13U] = u76; - uint64_t u77 = load64_le(b1 + 112U); - ws1[14U] = u77; - uint64_t u78 = load64_le(b1 + 120U); - ws1[15U] = u78; - uint64_t u79 = load64_le(b1 + 128U); - ws1[16U] = u79; - uint64_t u80 = load64_le(b1 + 136U); - ws1[17U] = u80; - uint64_t u81 = load64_le(b1 + 144U); - ws1[18U] = u81; - uint64_t u82 = load64_le(b1 + 152U); - ws1[19U] = u82; - uint64_t u83 = load64_le(b1 + 160U); - ws1[20U] = u83; - uint64_t u84 = load64_le(b1 + 168U); - ws1[21U] = u84; - uint64_t u85 = load64_le(b1 + 176U); - ws1[22U] = u85; - uint64_t u86 = load64_le(b1 + 184U); - ws1[23U] = u86; - uint64_t u87 = load64_le(b1 + 192U); - ws1[24U] = u87; - uint64_t u88 = load64_le(b1 + 200U); - ws1[25U] = u88; - uint64_t u89 = load64_le(b1 + 208U); - ws1[26U] = u89; - uint64_t u90 = load64_le(b1 + 216U); - ws1[27U] = u90; - uint64_t u91 = load64_le(b1 + 224U); - ws1[28U] = u91; - uint64_t u92 = load64_le(b1 + 232U); - ws1[29U] = u92; - uint64_t u93 = load64_le(b1 + 240U); - ws1[30U] = u93; - uint64_t u94 = load64_le(b1 + 248U); - ws1[31U] = u94; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws1[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____4 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____5 = current; - s[_Y] = uu____5 << r | uu____5 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } + absorb_inner_32(b4, s); return; } - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; uint32_t rem = input_len % len; uint8_t *b00 = input; uint8_t *bl0 = b_; @@ -641,70 +423,70 @@ Hacl_Hash_SHA3_update_last_sha3( b01[input_len % len] = suffix; uint64_t ws[32U] = { 0U }; uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws[31U] = u31; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; for (uint32_t i = 0U; i < 25U; i++) { s[i] = s[i] ^ ws[i]; @@ -723,8 +505,8 @@ Hacl_Hash_SHA3_update_last_sha3( 0U, 5U, 1U, - uint64_t uu____6 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____6 << 1U | uu____6 >> 63U); + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); uint64_t x = s[1U]; uint64_t current = x; @@ -733,8 +515,8 @@ Hacl_Hash_SHA3_update_last_sha3( uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; - uint64_t uu____7 = current; - s[_Y] = uu____7 << r | uu____7 >> (64U - r); + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); current = temp; } KRML_MAYBE_FOR5(i, @@ -755,123 +537,11 @@ Hacl_Hash_SHA3_update_last_sha3( s[0U] = s[0U] ^ c; } } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; b0[len - 1U] = 0x80U; - uint64_t ws0[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws0[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws0[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws0[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws0[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws0[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws0[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws0[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws0[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws0[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws0[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws0[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws0[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws0[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws0[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws0[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws0[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws0[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws0[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws0[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws0[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws0[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws0[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws0[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws0[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws0[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws0[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws0[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws0[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws0[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws0[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws0[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws0[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws0[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____8 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____8 << 1U | uu____8 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____9 = current; - s[_Y] = uu____9 << r | uu____9 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } + absorb_inner_32(b3, s); } typedef struct hash_buf2_s @@ -1361,290 +1031,78 @@ bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s) return uu____0 == Spec_Hash_Definitions_Shake128 || uu____0 == Spec_Hash_Definitions_Shake256; } -void -Hacl_Hash_SHA3_shake128( - uint8_t *output, - uint32_t outputByteLen, - uint8_t *input, - uint32_t inputByteLen -) +void Hacl_Hash_SHA3_absorb_inner_32(uint32_t rateInBytes, uint8_t *b, uint64_t *s) { - uint8_t *ib = input; - uint8_t *rb = output; - uint64_t s[25U] = { 0U }; - uint32_t rateInBytes1 = 168U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) - { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; - uint8_t *b0 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws[i]; - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____1 = current; - s[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - s[0U] = s[0U] ^ c; - } - } - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; - uint32_t rem = inputByteLen % rateInBytes1; - uint8_t *b00 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); - uint8_t *b01 = b_; - b01[inputByteLen % rateInBytes1] = 0x1FU; - uint64_t ws0[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws0[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws0[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws0[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws0[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws0[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws0[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws0[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws0[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws0[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws0[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws0[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws0[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws0[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws0[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws0[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws0[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws0[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws0[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws0[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws0[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws0[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws0[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws0[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws0[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws0[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws0[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws0[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws0[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws0[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws0[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws0[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws0[31U] = u31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws0[i]; - } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; - b0[rateInBytes1 - 1U] = 0x80U; - uint64_t ws1[32U] = { 0U }; - uint8_t *b1 = b4; + KRML_MAYBE_UNUSED_VAR(rateInBytes); + uint64_t ws[32U] = { 0U }; + uint8_t *b1 = b; uint64_t u = load64_le(b1); - ws1[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws1[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws1[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws1[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws1[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws1[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws1[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws1[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws1[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws1[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws1[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws1[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws1[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws1[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws1[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws1[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws1[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws1[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws1[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws1[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws1[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws1[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws1[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws1[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws1[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws1[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws1[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws1[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws1[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws1[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws1[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws1[31U] = u62; + ws[0U] = u; + uint64_t u0 = load64_le(b1 + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b1 + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b1 + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b1 + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b1 + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b1 + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b1 + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b1 + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b1 + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b1 + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b1 + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b1 + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b1 + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b1 + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b1 + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b1 + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b1 + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b1 + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b1 + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b1 + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b1 + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b1 + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b1 + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b1 + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b1 + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b1 + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b1 + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b1 + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b1 + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b1 + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b1 + 248U); + ws[31U] = u30; for (uint32_t i = 0U; i < 25U; i++) { - s[i] = s[i] ^ ws1[i]; + s[i] = s[i] ^ ws[i]; } for (uint32_t i0 = 0U; i0 < 24U; i0++) { @@ -1658,8 +1116,8 @@ Hacl_Hash_SHA3_shake128( 0U, 5U, 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); uint64_t x = s[1U]; uint64_t current = x; @@ -1668,8 +1126,8 @@ Hacl_Hash_SHA3_shake128( uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; - uint64_t uu____3 = current; - s[_Y] = uu____3 << r | uu____3 >> (64U - r); + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); current = temp; } KRML_MAYBE_FOR5(i, @@ -1689,6 +1147,112 @@ Hacl_Hash_SHA3_shake128( uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; s[0U] = s[0U] ^ c; } +} + +void +Hacl_Hash_SHA3_shake128( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 168U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) { uint8_t hbuf[256U] = { 0U }; @@ -1712,8 +1276,8 @@ Hacl_Hash_SHA3_shake128( 0U, 5U, 1U, - uint64_t uu____4 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); uint64_t x = s[1U]; uint64_t current = x; @@ -1722,8 +1286,8 @@ Hacl_Hash_SHA3_shake128( uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; - uint64_t uu____5 = current; - s[_Y] = uu____5 << r | uu____5 >> (64U - r); + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); current = temp; } KRML_MAYBE_FOR5(i, @@ -1767,83 +1331,109 @@ Hacl_Hash_SHA3_shake256( uint8_t *rb = output; uint64_t s[25U] = { 0U }; uint32_t rateInBytes1 = 136U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; uint8_t *b0 = ib; uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) { - s[i] = s[i] ^ ws[i]; + store64_le(hbuf + i * 8U, ws[i]); } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); for (uint32_t i1 = 0U; i1 < 24U; i1++) { uint64_t _C[5U] = { 0U }; @@ -1888,202 +1478,116 @@ Hacl_Hash_SHA3_shake256( s[0U] = s[0U] ^ c; } } - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 144U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b00 = ib; uint8_t *bl0 = b_; memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); uint8_t *b01 = b_; - b01[inputByteLen % rateInBytes1] = 0x1FU; + b01[inputByteLen % rateInBytes1] = 0x06U; uint64_t ws0[32U] = { 0U }; uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws0[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws0[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws0[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws0[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws0[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws0[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws0[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws0[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws0[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws0[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws0[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws0[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws0[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws0[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws0[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws0[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws0[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws0[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws0[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws0[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws0[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws0[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws0[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws0[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws0[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws0[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws0[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws0[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws0[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws0[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws0[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws0[31U] = u31; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; for (uint32_t i = 0U; i < 25U; i++) { s[i] = s[i] ^ ws0[i]; } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; b0[rateInBytes1 - 1U] = 0x80U; - uint64_t ws1[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws1[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws1[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws1[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws1[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws1[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws1[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws1[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws1[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws1[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws1[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws1[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws1[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws1[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws1[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws1[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws1[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws1[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws1[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws1[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws1[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws1[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws1[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws1[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws1[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws1[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws1[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws1[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws1[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws1[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws1[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws1[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws1[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws1[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____3 = current; - s[_Y] = uu____3 << r | uu____3 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } - for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) { uint8_t hbuf[256U] = { 0U }; uint64_t ws[32U] = { 0U }; @@ -2106,8 +1610,8 @@ Hacl_Hash_SHA3_shake256( 0U, 5U, 1U, - uint64_t uu____4 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); uint64_t x = s[1U]; uint64_t current = x; @@ -2116,8 +1620,8 @@ Hacl_Hash_SHA3_shake256( uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; - uint64_t uu____5 = current; - s[_Y] = uu____5 << r | uu____5 >> (64U - r); + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); current = temp; } KRML_MAYBE_FOR5(i, @@ -2138,7 +1642,7 @@ Hacl_Hash_SHA3_shake256( s[0U] = s[0U] ^ c; } } - uint32_t remOut = outputByteLen % rateInBytes1; + uint32_t remOut = 28U % rateInBytes1; uint8_t hbuf[256U] = { 0U }; uint64_t ws[32U] = { 0U }; memcpy(ws, s, 25U * sizeof (uint64_t)); @@ -2146,92 +1650,118 @@ Hacl_Hash_SHA3_shake256( { store64_le(hbuf + i * 8U, ws[i]); } - memcpy(rb + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(rb + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); } -void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen) { uint8_t *ib = input; uint8_t *rb = output; uint64_t s[25U] = { 0U }; - uint32_t rateInBytes1 = 144U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + uint32_t rateInBytes1 = 136U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; uint8_t *b0 = ib; uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) { - s[i] = s[i] ^ ws[i]; + store64_le(hbuf + i * 8U, ws[i]); } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); for (uint32_t i1 = 0U; i1 < 24U; i1++) { uint64_t _C[5U] = { 0U }; @@ -2276,1033 +1806,7 @@ void Hacl_Hash_SHA3_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByte s[0U] = s[0U] ^ c; } } - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; - uint32_t rem = inputByteLen % rateInBytes1; - uint8_t *b00 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); - uint8_t *b01 = b_; - b01[inputByteLen % rateInBytes1] = 0x06U; - uint64_t ws0[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws0[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws0[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws0[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws0[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws0[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws0[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws0[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws0[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws0[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws0[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws0[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws0[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws0[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws0[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws0[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws0[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws0[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws0[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws0[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws0[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws0[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws0[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws0[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws0[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws0[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws0[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws0[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws0[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws0[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws0[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws0[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws0[31U] = u31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws0[i]; - } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; - b0[rateInBytes1 - 1U] = 0x80U; - uint64_t ws1[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws1[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws1[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws1[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws1[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws1[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws1[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws1[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws1[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws1[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws1[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws1[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws1[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws1[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws1[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws1[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws1[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws1[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws1[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws1[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws1[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws1[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws1[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws1[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws1[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws1[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws1[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws1[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws1[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws1[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws1[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws1[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws1[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws1[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____3 = current; - s[_Y] = uu____3 << r | uu____3 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } - for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) - { - uint8_t hbuf[256U] = { 0U }; - uint64_t ws[32U] = { 0U }; - memcpy(ws, s, 25U * sizeof (uint64_t)); - for (uint32_t i = 0U; i < 32U; i++) - { - store64_le(hbuf + i * 8U, ws[i]); - } - uint8_t *b02 = rb; - memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____4 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____5 = current; - s[_Y] = uu____5 << r | uu____5 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - s[0U] = s[0U] ^ c; - } - } - uint32_t remOut = 28U % rateInBytes1; - uint8_t hbuf[256U] = { 0U }; - uint64_t ws[32U] = { 0U }; - memcpy(ws, s, 25U * sizeof (uint64_t)); - for (uint32_t i = 0U; i < 32U; i++) - { - store64_le(hbuf + i * 8U, ws[i]); - } - memcpy(rb + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); -} - -void Hacl_Hash_SHA3_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen) -{ - uint8_t *ib = input; - uint8_t *rb = output; - uint64_t s[25U] = { 0U }; - uint32_t rateInBytes1 = 136U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) - { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; - uint8_t *b0 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws[i]; - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____1 = current; - s[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - s[0U] = s[0U] ^ c; - } - } - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; - uint32_t rem = inputByteLen % rateInBytes1; - uint8_t *b00 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); - uint8_t *b01 = b_; - b01[inputByteLen % rateInBytes1] = 0x06U; - uint64_t ws0[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws0[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws0[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws0[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws0[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws0[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws0[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws0[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws0[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws0[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws0[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws0[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws0[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws0[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws0[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws0[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws0[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws0[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws0[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws0[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws0[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws0[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws0[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws0[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws0[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws0[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws0[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws0[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws0[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws0[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws0[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws0[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws0[31U] = u31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws0[i]; - } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; - b0[rateInBytes1 - 1U] = 0x80U; - uint64_t ws1[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws1[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws1[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws1[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws1[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws1[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws1[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws1[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws1[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws1[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws1[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws1[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws1[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws1[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws1[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws1[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws1[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws1[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws1[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws1[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws1[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws1[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws1[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws1[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws1[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws1[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws1[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws1[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws1[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws1[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws1[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws1[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws1[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws1[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____3 = current; - s[_Y] = uu____3 << r | uu____3 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } - for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) - { - uint8_t hbuf[256U] = { 0U }; - uint64_t ws[32U] = { 0U }; - memcpy(ws, s, 25U * sizeof (uint64_t)); - for (uint32_t i = 0U; i < 32U; i++) - { - store64_le(hbuf + i * 8U, ws[i]); - } - uint8_t *b02 = rb; - memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____4 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____5 = current; - s[_Y] = uu____5 << r | uu____5 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - s[0U] = s[0U] ^ c; - } - } - uint32_t remOut = 32U % rateInBytes1; - uint8_t hbuf[256U] = { 0U }; - uint64_t ws[32U] = { 0U }; - memcpy(ws, s, 25U * sizeof (uint64_t)); - for (uint32_t i = 0U; i < 32U; i++) - { - store64_le(hbuf + i * 8U, ws[i]); - } - memcpy(rb + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); -} - -void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen) -{ - uint8_t *ib = input; - uint8_t *rb = output; - uint64_t s[25U] = { 0U }; - uint32_t rateInBytes1 = 104U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) - { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; - uint8_t *b0 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws[i]; - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____1 = current; - s[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - s[0U] = s[0U] ^ c; - } - } - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; - uint32_t rem = inputByteLen % rateInBytes1; - uint8_t *b00 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); - uint8_t *b01 = b_; - b01[inputByteLen % rateInBytes1] = 0x06U; - uint64_t ws0[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws0[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws0[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws0[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws0[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws0[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws0[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws0[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws0[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws0[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws0[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws0[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws0[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws0[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws0[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws0[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws0[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws0[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws0[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws0[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws0[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws0[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws0[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws0[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws0[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws0[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws0[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws0[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws0[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws0[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws0[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws0[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws0[31U] = u31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws0[i]; - } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; - b0[rateInBytes1 - 1U] = 0x80U; - uint64_t ws1[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws1[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws1[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws1[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws1[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws1[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws1[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws1[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws1[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws1[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws1[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws1[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws1[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws1[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws1[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws1[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws1[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws1[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws1[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws1[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws1[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws1[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws1[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws1[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws1[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws1[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws1[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws1[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws1[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws1[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws1[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws1[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws1[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws1[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____3 = current; - s[_Y] = uu____3 << r | uu____3 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } - for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) - { - uint8_t hbuf[256U] = { 0U }; - uint64_t ws[32U] = { 0U }; - memcpy(ws, s, 25U * sizeof (uint64_t)); - for (uint32_t i = 0U; i < 32U; i++) - { - store64_le(hbuf + i * 8U, ws[i]); - } - uint8_t *b02 = rb; - memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____4 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____5 = current; - s[_Y] = uu____5 << r | uu____5 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - s[0U] = s[0U] ^ c; - } - } - uint32_t remOut = 48U % rateInBytes1; + uint32_t remOut = 32U % rateInBytes1; uint8_t hbuf[256U] = { 0U }; uint64_t ws[32U] = { 0U }; memcpy(ws, s, 25U * sizeof (uint64_t)); @@ -3310,92 +1814,118 @@ void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByte { store64_le(hbuf + i * 8U, ws[i]); } - memcpy(rb + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(rb + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); } -void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +void Hacl_Hash_SHA3_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen) { uint8_t *ib = input; uint8_t *rb = output; - uint64_t s[25U] = { 0U }; - uint32_t rateInBytes1 = 72U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) - { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; - uint8_t *b0 = ib; - uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 104U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b00 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[inputByteLen % rateInBytes1] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; + b0[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) { - s[i] = s[i] ^ ws[i]; + store64_le(hbuf + i * 8U, ws[i]); } + uint8_t *b02 = rb; + memcpy(b02 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); for (uint32_t i1 = 0U; i1 < 24U; i1++) { uint64_t _C[5U] = { 0U }; @@ -3440,8 +1970,34 @@ void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByte s[0U] = s[0U] ^ c; } } - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; + uint32_t remOut = 48U % rateInBytes1; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(rb + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint8_t *ib = input; + uint8_t *rb = output; + uint64_t s[25U] = { 0U }; + uint32_t rateInBytes1 = 72U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; + uint8_t *b0 = ib; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b_, s); + } + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b00 = ib; uint8_t *bl0 = b_; @@ -3450,191 +2006,79 @@ void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByte b01[inputByteLen % rateInBytes1] = 0x06U; uint64_t ws0[32U] = { 0U }; uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws0[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws0[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws0[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws0[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws0[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws0[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws0[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws0[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws0[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws0[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws0[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws0[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws0[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws0[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws0[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws0[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws0[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws0[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws0[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws0[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws0[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws0[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws0[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws0[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws0[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws0[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws0[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws0[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws0[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws0[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws0[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws0[31U] = u31; + uint64_t u = load64_le(b); + ws0[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws0[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws0[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws0[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws0[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws0[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws0[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws0[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws0[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws0[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws0[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws0[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws0[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws0[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws0[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws0[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws0[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws0[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws0[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws0[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws0[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws0[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws0[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws0[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws0[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws0[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws0[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws0[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws0[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws0[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws0[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws0[31U] = u30; for (uint32_t i = 0U; i < 25U; i++) { s[i] = s[i] ^ ws0[i]; } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; b0[rateInBytes1 - 1U] = 0x80U; - uint64_t ws1[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws1[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws1[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws1[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws1[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws1[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws1[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws1[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws1[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws1[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws1[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws1[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws1[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws1[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws1[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws1[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws1[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws1[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws1[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws1[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws1[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws1[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws1[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws1[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws1[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws1[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws1[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws1[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws1[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws1[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws1[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws1[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws1[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = s[i] ^ ws1[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); - uint64_t x = s[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - uint64_t uu____3 = current; - s[_Y] = uu____3 << r | uu____3 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); - uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); - uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); - uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); - uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } + Hacl_Hash_SHA3_absorb_inner_32(rateInBytes1, b3, s); for (uint32_t i0 = 0U; i0 < 64U / rateInBytes1; i0++) { uint8_t hbuf[256U] = { 0U }; @@ -3658,8 +2102,8 @@ void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByte 0U, 5U, 1U, - uint64_t uu____4 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); uint64_t x = s[1U]; uint64_t current = x; @@ -3668,8 +2112,8 @@ void Hacl_Hash_SHA3_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByte uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; uint64_t temp = s[_Y]; - uint64_t uu____5 = current; - s[_Y] = uu____5 << r | uu____5 >> (64U - r); + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); current = temp; } KRML_MAYBE_FOR5(i, @@ -3732,129 +2176,14 @@ Absorb number of input blocks and write the output state void Hacl_Hash_SHA3_shake128_absorb_nblocks(uint64_t *state, uint8_t *input, uint32_t inputByteLen) { - for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) + for (uint32_t i = 0U; i < inputByteLen / 168U; i++) { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; + uint8_t b[256U] = { 0U }; + uint8_t *b_ = b; uint8_t *b0 = input; uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * 168U, 168U * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws[i]; - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = - state[i - + 0U] - ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); - uint64_t x = state[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = state[_Y]; - uint64_t uu____1 = current; - state[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); - uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); - uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); - uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); - uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); - state[0U + 5U * i] = v0; - state[1U + 5U * i] = v1; - state[2U + 5U * i] = v2; - state[3U + 5U * i] = v3; - state[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - state[0U] = state[0U] ^ c; - } + memcpy(bl0, b0 + i * 168U, 168U * sizeof (uint8_t)); + Hacl_Hash_SHA3_absorb_inner_32(168U, b_, state); } } @@ -3876,8 +2205,8 @@ Absorb a final partial block of input and write the output state void Hacl_Hash_SHA3_shake128_absorb_final(uint64_t *state, uint8_t *input, uint32_t inputByteLen) { - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; uint32_t rem = inputByteLen % 168U; uint8_t *b00 = input; uint8_t *bl0 = b_; @@ -3886,191 +2215,79 @@ Hacl_Hash_SHA3_shake128_absorb_final(uint64_t *state, uint8_t *input, uint32_t i b01[inputByteLen % 168U] = 0x1FU; uint64_t ws[32U] = { 0U }; uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws[31U] = u31; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; for (uint32_t i = 0U; i < 25U; i++) { state[i] = state[i] ^ ws[i]; } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; + uint8_t b2[256U] = { 0U }; + uint8_t *b3 = b2; + uint8_t *b0 = b3; b0[167U] = 0x80U; - uint64_t ws0[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws0[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws0[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws0[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws0[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws0[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws0[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws0[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws0[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws0[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws0[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws0[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws0[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws0[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws0[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws0[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws0[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws0[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws0[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws0[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws0[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws0[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws0[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws0[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws0[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws0[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws0[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws0[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws0[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws0[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws0[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws0[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws0[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws0[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = state[i + 0U] ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i1 + 5U * i] = state[i1 + 5U * i] ^ _D;);); - uint64_t x = state[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - uint64_t temp = state[_Y]; - uint64_t uu____1 = current; - state[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); - uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); - uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); - uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); - uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); - state[0U + 5U * i] = v0; - state[1U + 5U * i] = v1; - state[2U + 5U * i] = v2; - state[3U + 5U * i] = v3; - state[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - state[0U] = state[0U] ^ c; - } + Hacl_Hash_SHA3_absorb_inner_32(168U, b3, state); } /** diff --git a/src/msvc/Hacl_Hash_SHA3_Simd256.c b/src/msvc/Hacl_Hash_SHA3_Simd256.c index b14b01eb..131c34e6 100644 --- a/src/msvc/Hacl_Hash_SHA3_Simd256.c +++ b/src/msvc/Hacl_Hash_SHA3_Simd256.c @@ -27,6 +27,376 @@ #include "internal/Hacl_Hash_SHA3.h" +void +Hacl_Hash_SHA3_Simd256_absorb_inner_256( + uint32_t rateInBytes, + Hacl_Hash_SHA2_uint8_4p b, + Lib_IntVector_Intrinsics_vec256 *s +) +{ + KRML_MAYBE_UNUSED_VAR(rateInBytes); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b2 = b.snd.snd.fst; + uint8_t *b1 = b.snd.fst; + uint8_t *b0 = b.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } +} + void Hacl_Hash_SHA3_Simd256_shake128( uint8_t *output0, @@ -41,5341 +411,39 @@ Hacl_Hash_SHA3_Simd256_shake128( uint32_t inputByteLen ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; uint32_t rateInBytes1 = 168U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) { uint8_t b00[256U] = { 0U }; uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; uint8_t *bl3 = b_.snd.snd.snd; uint8_t *bl2 = b_.snd.snd.fst; uint8_t *bl1 = b_.snd.fst; uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b_.snd.snd.snd; - uint8_t *b2 = b_.snd.snd.fst; - uint8_t *b1 = b_.snd.fst; - uint8_t *b0 = b_.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - ws[16U] = ws16; - ws[17U] = ws17; - ws[18U] = ws18; - ws[19U] = ws19; - ws[20U] = ws20; - ws[21U] = ws21; - ws[22U] = ws22; - ws[23U] = ws23; - ws[24U] = ws24; - ws[25U] = ws25; - ws[26U] = ws26; - ws[27U] = ws27; - ws[28U] = ws28; - ws[29U] = ws29; - ws[30U] = ws30; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____0, - Lib_IntVector_Intrinsics_vec256_xor(uu____1, - Lib_IntVector_Intrinsics_vec256_xor(uu____2, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____3, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____5 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v07 = - Lib_IntVector_Intrinsics_vec256_xor(uu____6, - Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v17 = - Lib_IntVector_Intrinsics_vec256_xor(uu____8, - Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v27 = - Lib_IntVector_Intrinsics_vec256_xor(uu____10, - Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v37 = - Lib_IntVector_Intrinsics_vec256_xor(uu____12, - Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____14, - Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); - s[0U + 5U * i] = v07; - s[1U + 5U * i] = v17; - s[2U + 5U * i] = v27; - s[3U + 5U * i] = v37; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____16, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - } - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint32_t rem = inputByteLen % rateInBytes1; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); - uint8_t *b32 = b_.snd.snd.snd; - uint8_t *b22 = b_.snd.snd.fst; - uint8_t *b12 = b_.snd.fst; - uint8_t *b02 = b_.fst; - b02[inputByteLen % rateInBytes1] = 0x1FU; - b12[inputByteLen % rateInBytes1] = 0x1FU; - b22[inputByteLen % rateInBytes1] = 0x1FU; - b32[inputByteLen % rateInBytes1] = 0x1FU; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b33 = b_.snd.snd.snd; - uint8_t *b23 = b_.snd.snd.fst; - uint8_t *b13 = b_.snd.fst; - uint8_t *b03 = b_.fst; - ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); - ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); - ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); - ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); - ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); - ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); - ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); - ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); - ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); - ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); - ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); - ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); - ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); - ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); - ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); - ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); - ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); - ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); - ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); - ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); - ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); - ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); - ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); - ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); - ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); - ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); - ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); - ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); - ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); - ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); - ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); - ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws00 = v0__; - Lib_IntVector_Intrinsics_vec256 ws110 = v2__; - Lib_IntVector_Intrinsics_vec256 ws210 = v1__; - Lib_IntVector_Intrinsics_vec256 ws33 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; - Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; - Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; - Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; - Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); - Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); - Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); - Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); - Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; - ws32[0U] = ws00; - ws32[1U] = ws110; - ws32[2U] = ws210; - ws32[3U] = ws33; - ws32[4U] = ws40; - ws32[5U] = ws50; - ws32[6U] = ws60; - ws32[7U] = ws70; - ws32[8U] = ws80; - ws32[9U] = ws90; - ws32[10U] = ws100; - ws32[11U] = ws111; - ws32[12U] = ws120; - ws32[13U] = ws130; - ws32[14U] = ws140; - ws32[15U] = ws150; - ws32[16U] = ws160; - ws32[17U] = ws170; - ws32[18U] = ws180; - ws32[19U] = ws190; - ws32[20U] = ws200; - ws32[21U] = ws211; - ws32[22U] = ws220; - ws32[23U] = ws230; - ws32[24U] = ws240; - ws32[25U] = ws250; - ws32[26U] = ws260; - ws32[27U] = ws270; - ws32[28U] = ws280; - ws32[29U] = ws290; - ws32[30U] = ws300; - ws32[31U] = ws310; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); - } - uint8_t b04[256U] = { 0U }; - uint8_t b14[256U] = { 0U }; - uint8_t b24[256U] = { 0U }; - uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; - uint8_t *b35 = b.snd.snd.snd; - uint8_t *b25 = b.snd.snd.fst; - uint8_t *b15 = b.snd.fst; - uint8_t *b05 = b.fst; - b05[rateInBytes1 - 1U] = 0x80U; - b15[rateInBytes1 - 1U] = 0x80U; - b25[rateInBytes1 - 1U] = 0x80U; - b35[rateInBytes1 - 1U] = 0x80U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b.snd.snd.snd; - uint8_t *b26 = b.snd.snd.fst; - uint8_t *b16 = b.snd.fst; - uint8_t *b06 = b.fst; - ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); - ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); - ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); - ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); - ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); - ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); - ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); - ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); - ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); - ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); - ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); - ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); - ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); - ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); - ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); - ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); - ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); - ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); - ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); - ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); - ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); - ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); - ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); - ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); - ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; - Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; - Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; - Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 - v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; - Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; - Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; - Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; - Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; - Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; - Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; - Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 - v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; - Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; - Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; - Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; - Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; - Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; - Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; - Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 - v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; - Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; - Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; - Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; - Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; - Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; - Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; - Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 - v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; - Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; - Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; - Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; - Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; - Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; - Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; - Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 - v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; - Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; - Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; - Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; - Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; - Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; - Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; - Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 - v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; - Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; - Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; - Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; - Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; - Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; - Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; - Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 - v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; - Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; - Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; - Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; - Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; - Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; - Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; - Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); - Lib_IntVector_Intrinsics_vec256 - v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); - Lib_IntVector_Intrinsics_vec256 - v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); - Lib_IntVector_Intrinsics_vec256 - v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); - Lib_IntVector_Intrinsics_vec256 - v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 - v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; - Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; - Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; - Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; - ws34[0U] = ws01; - ws34[1U] = ws112; - ws34[2U] = ws212; - ws34[3U] = ws35; - ws34[4U] = ws41; - ws34[5U] = ws51; - ws34[6U] = ws61; - ws34[7U] = ws71; - ws34[8U] = ws81; - ws34[9U] = ws91; - ws34[10U] = ws101; - ws34[11U] = ws113; - ws34[12U] = ws121; - ws34[13U] = ws131; - ws34[14U] = ws141; - ws34[15U] = ws151; - ws34[16U] = ws161; - ws34[17U] = ws171; - ws34[18U] = ws181; - ws34[19U] = ws191; - ws34[20U] = ws201; - ws34[21U] = ws213; - ws34[22U] = ws221; - ws34[23U] = ws231; - ws34[24U] = ws241; - ws34[25U] = ws251; - ws34[26U] = ws261; - ws34[27U] = ws271; - ws34[28U] = ws281; - ws34[29U] = ws291; - ws34[30U] = ws301; - ws34[31U] = ws311; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____17, - Lib_IntVector_Intrinsics_vec256_xor(uu____18, - Lib_IntVector_Intrinsics_vec256_xor(uu____19, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____20, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____22 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v0 = - Lib_IntVector_Intrinsics_vec256_xor(uu____23, - Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v1 = - Lib_IntVector_Intrinsics_vec256_xor(uu____25, - Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v2 = - Lib_IntVector_Intrinsics_vec256_xor(uu____27, - Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v3 = - Lib_IntVector_Intrinsics_vec256_xor(uu____29, - Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____31, - Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____33, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) - { - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____34, - Lib_IntVector_Intrinsics_vec256_xor(uu____35, - Lib_IntVector_Intrinsics_vec256_xor(uu____36, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____37, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____39 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v023 = - Lib_IntVector_Intrinsics_vec256_xor(uu____40, - Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v123 = - Lib_IntVector_Intrinsics_vec256_xor(uu____42, - Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v223 = - Lib_IntVector_Intrinsics_vec256_xor(uu____44, - Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v323 = - Lib_IntVector_Intrinsics_vec256_xor(uu____46, - Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____48, - Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); - s[0U + 5U * i] = v023; - s[1U + 5U * i] = v123; - s[2U + 5U * i] = v223; - s[3U + 5U * i] = v323; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____50, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - } - uint32_t remOut = outputByteLen % rateInBytes1; - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); - memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); - memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); - memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); -} - -void -Hacl_Hash_SHA3_Simd256_shake256( - uint8_t *output0, - uint8_t *output1, - uint8_t *output2, - uint8_t *output3, - uint32_t outputByteLen, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint32_t inputByteLen -) -{ - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rateInBytes1 = 136U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) - { - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b_.snd.snd.snd; - uint8_t *b2 = b_.snd.snd.fst; - uint8_t *b1 = b_.snd.fst; - uint8_t *b0 = b_.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - ws[16U] = ws16; - ws[17U] = ws17; - ws[18U] = ws18; - ws[19U] = ws19; - ws[20U] = ws20; - ws[21U] = ws21; - ws[22U] = ws22; - ws[23U] = ws23; - ws[24U] = ws24; - ws[25U] = ws25; - ws[26U] = ws26; - ws[27U] = ws27; - ws[28U] = ws28; - ws[29U] = ws29; - ws[30U] = ws30; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____0, - Lib_IntVector_Intrinsics_vec256_xor(uu____1, - Lib_IntVector_Intrinsics_vec256_xor(uu____2, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____3, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____5 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v07 = - Lib_IntVector_Intrinsics_vec256_xor(uu____6, - Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v17 = - Lib_IntVector_Intrinsics_vec256_xor(uu____8, - Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v27 = - Lib_IntVector_Intrinsics_vec256_xor(uu____10, - Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v37 = - Lib_IntVector_Intrinsics_vec256_xor(uu____12, - Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____14, - Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); - s[0U + 5U * i] = v07; - s[1U + 5U * i] = v17; - s[2U + 5U * i] = v27; - s[3U + 5U * i] = v37; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____16, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - } - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint32_t rem = inputByteLen % rateInBytes1; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); - uint8_t *b32 = b_.snd.snd.snd; - uint8_t *b22 = b_.snd.snd.fst; - uint8_t *b12 = b_.snd.fst; - uint8_t *b02 = b_.fst; - b02[inputByteLen % rateInBytes1] = 0x1FU; - b12[inputByteLen % rateInBytes1] = 0x1FU; - b22[inputByteLen % rateInBytes1] = 0x1FU; - b32[inputByteLen % rateInBytes1] = 0x1FU; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b33 = b_.snd.snd.snd; - uint8_t *b23 = b_.snd.snd.fst; - uint8_t *b13 = b_.snd.fst; - uint8_t *b03 = b_.fst; - ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); - ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); - ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); - ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); - ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); - ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); - ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); - ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); - ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); - ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); - ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); - ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); - ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); - ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); - ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); - ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); - ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); - ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); - ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); - ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); - ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); - ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); - ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); - ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); - ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); - ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); - ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); - ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); - ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); - ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); - ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); - ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws00 = v0__; - Lib_IntVector_Intrinsics_vec256 ws110 = v2__; - Lib_IntVector_Intrinsics_vec256 ws210 = v1__; - Lib_IntVector_Intrinsics_vec256 ws33 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; - Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; - Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; - Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; - Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); - Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); - Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); - Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); - Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; - ws32[0U] = ws00; - ws32[1U] = ws110; - ws32[2U] = ws210; - ws32[3U] = ws33; - ws32[4U] = ws40; - ws32[5U] = ws50; - ws32[6U] = ws60; - ws32[7U] = ws70; - ws32[8U] = ws80; - ws32[9U] = ws90; - ws32[10U] = ws100; - ws32[11U] = ws111; - ws32[12U] = ws120; - ws32[13U] = ws130; - ws32[14U] = ws140; - ws32[15U] = ws150; - ws32[16U] = ws160; - ws32[17U] = ws170; - ws32[18U] = ws180; - ws32[19U] = ws190; - ws32[20U] = ws200; - ws32[21U] = ws211; - ws32[22U] = ws220; - ws32[23U] = ws230; - ws32[24U] = ws240; - ws32[25U] = ws250; - ws32[26U] = ws260; - ws32[27U] = ws270; - ws32[28U] = ws280; - ws32[29U] = ws290; - ws32[30U] = ws300; - ws32[31U] = ws310; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); - } - uint8_t b04[256U] = { 0U }; - uint8_t b14[256U] = { 0U }; - uint8_t b24[256U] = { 0U }; - uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; - uint8_t *b35 = b.snd.snd.snd; - uint8_t *b25 = b.snd.snd.fst; - uint8_t *b15 = b.snd.fst; - uint8_t *b05 = b.fst; - b05[rateInBytes1 - 1U] = 0x80U; - b15[rateInBytes1 - 1U] = 0x80U; - b25[rateInBytes1 - 1U] = 0x80U; - b35[rateInBytes1 - 1U] = 0x80U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b.snd.snd.snd; - uint8_t *b26 = b.snd.snd.fst; - uint8_t *b16 = b.snd.fst; - uint8_t *b06 = b.fst; - ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); - ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); - ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); - ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); - ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); - ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); - ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); - ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); - ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); - ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); - ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); - ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); - ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); - ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); - ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); - ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); - ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); - ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); - ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); - ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); - ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); - ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); - ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); - ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); - ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; - Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; - Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; - Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 - v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; - Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; - Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; - Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; - Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; - Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; - Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; - Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 - v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; - Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; - Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; - Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; - Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; - Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; - Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; - Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 - v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; - Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; - Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; - Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; - Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; - Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; - Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; - Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 - v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; - Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; - Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; - Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; - Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; - Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; - Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; - Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 - v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; - Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; - Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; - Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; - Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; - Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; - Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; - Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 - v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; - Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; - Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; - Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; - Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; - Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; - Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; - Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 - v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; - Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; - Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; - Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; - Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; - Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; - Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; - Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); - Lib_IntVector_Intrinsics_vec256 - v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); - Lib_IntVector_Intrinsics_vec256 - v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); - Lib_IntVector_Intrinsics_vec256 - v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); - Lib_IntVector_Intrinsics_vec256 - v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 - v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; - Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; - Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; - Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; - ws34[0U] = ws01; - ws34[1U] = ws112; - ws34[2U] = ws212; - ws34[3U] = ws35; - ws34[4U] = ws41; - ws34[5U] = ws51; - ws34[6U] = ws61; - ws34[7U] = ws71; - ws34[8U] = ws81; - ws34[9U] = ws91; - ws34[10U] = ws101; - ws34[11U] = ws113; - ws34[12U] = ws121; - ws34[13U] = ws131; - ws34[14U] = ws141; - ws34[15U] = ws151; - ws34[16U] = ws161; - ws34[17U] = ws171; - ws34[18U] = ws181; - ws34[19U] = ws191; - ws34[20U] = ws201; - ws34[21U] = ws213; - ws34[22U] = ws221; - ws34[23U] = ws231; - ws34[24U] = ws241; - ws34[25U] = ws251; - ws34[26U] = ws261; - ws34[27U] = ws271; - ws34[28U] = ws281; - ws34[29U] = ws291; - ws34[30U] = ws301; - ws34[31U] = ws311; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____17, - Lib_IntVector_Intrinsics_vec256_xor(uu____18, - Lib_IntVector_Intrinsics_vec256_xor(uu____19, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____20, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____22 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v0 = - Lib_IntVector_Intrinsics_vec256_xor(uu____23, - Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v1 = - Lib_IntVector_Intrinsics_vec256_xor(uu____25, - Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v2 = - Lib_IntVector_Intrinsics_vec256_xor(uu____27, - Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v3 = - Lib_IntVector_Intrinsics_vec256_xor(uu____29, - Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____31, - Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____33, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) - { - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____34, - Lib_IntVector_Intrinsics_vec256_xor(uu____35, - Lib_IntVector_Intrinsics_vec256_xor(uu____36, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____37, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____39 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v023 = - Lib_IntVector_Intrinsics_vec256_xor(uu____40, - Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v123 = - Lib_IntVector_Intrinsics_vec256_xor(uu____42, - Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v223 = - Lib_IntVector_Intrinsics_vec256_xor(uu____44, - Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v323 = - Lib_IntVector_Intrinsics_vec256_xor(uu____46, - Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____48, - Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); - s[0U + 5U * i] = v023; - s[1U + 5U * i] = v123; - s[2U + 5U * i] = v223; - s[3U + 5U * i] = v323; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____50, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - } - uint32_t remOut = outputByteLen % rateInBytes1; - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); - memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); - memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); - memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); -} - -void -Hacl_Hash_SHA3_Simd256_sha3_224( - uint8_t *output0, - uint8_t *output1, - uint8_t *output2, - uint8_t *output3, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint32_t inputByteLen -) -{ - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rateInBytes1 = 144U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) - { - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b_.snd.snd.snd; - uint8_t *b2 = b_.snd.snd.fst; - uint8_t *b1 = b_.snd.fst; - uint8_t *b0 = b_.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - ws[16U] = ws16; - ws[17U] = ws17; - ws[18U] = ws18; - ws[19U] = ws19; - ws[20U] = ws20; - ws[21U] = ws21; - ws[22U] = ws22; - ws[23U] = ws23; - ws[24U] = ws24; - ws[25U] = ws25; - ws[26U] = ws26; - ws[27U] = ws27; - ws[28U] = ws28; - ws[29U] = ws29; - ws[30U] = ws30; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____0, - Lib_IntVector_Intrinsics_vec256_xor(uu____1, - Lib_IntVector_Intrinsics_vec256_xor(uu____2, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____3, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____5 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v07 = - Lib_IntVector_Intrinsics_vec256_xor(uu____6, - Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v17 = - Lib_IntVector_Intrinsics_vec256_xor(uu____8, - Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v27 = - Lib_IntVector_Intrinsics_vec256_xor(uu____10, - Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v37 = - Lib_IntVector_Intrinsics_vec256_xor(uu____12, - Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____14, - Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); - s[0U + 5U * i] = v07; - s[1U + 5U * i] = v17; - s[2U + 5U * i] = v27; - s[3U + 5U * i] = v37; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____16, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - } - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint32_t rem = inputByteLen % rateInBytes1; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); - memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); - uint8_t *b32 = b_.snd.snd.snd; - uint8_t *b22 = b_.snd.snd.fst; - uint8_t *b12 = b_.snd.fst; - uint8_t *b02 = b_.fst; - b02[inputByteLen % rateInBytes1] = 0x06U; - b12[inputByteLen % rateInBytes1] = 0x06U; - b22[inputByteLen % rateInBytes1] = 0x06U; - b32[inputByteLen % rateInBytes1] = 0x06U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b33 = b_.snd.snd.snd; - uint8_t *b23 = b_.snd.snd.fst; - uint8_t *b13 = b_.snd.fst; - uint8_t *b03 = b_.fst; - ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); - ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); - ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); - ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); - ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); - ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); - ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); - ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); - ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); - ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); - ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); - ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); - ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); - ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); - ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); - ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); - ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); - ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); - ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); - ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); - ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); - ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); - ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); - ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); - ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); - ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); - ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); - ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); - ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); - ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); - ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); - ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws00 = v0__; - Lib_IntVector_Intrinsics_vec256 ws110 = v2__; - Lib_IntVector_Intrinsics_vec256 ws210 = v1__; - Lib_IntVector_Intrinsics_vec256 ws33 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; - Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; - Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; - Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; - Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); - Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); - Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); - Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); - Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; - ws32[0U] = ws00; - ws32[1U] = ws110; - ws32[2U] = ws210; - ws32[3U] = ws33; - ws32[4U] = ws40; - ws32[5U] = ws50; - ws32[6U] = ws60; - ws32[7U] = ws70; - ws32[8U] = ws80; - ws32[9U] = ws90; - ws32[10U] = ws100; - ws32[11U] = ws111; - ws32[12U] = ws120; - ws32[13U] = ws130; - ws32[14U] = ws140; - ws32[15U] = ws150; - ws32[16U] = ws160; - ws32[17U] = ws170; - ws32[18U] = ws180; - ws32[19U] = ws190; - ws32[20U] = ws200; - ws32[21U] = ws211; - ws32[22U] = ws220; - ws32[23U] = ws230; - ws32[24U] = ws240; - ws32[25U] = ws250; - ws32[26U] = ws260; - ws32[27U] = ws270; - ws32[28U] = ws280; - ws32[29U] = ws290; - ws32[30U] = ws300; - ws32[31U] = ws310; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); - } - uint8_t b04[256U] = { 0U }; - uint8_t b14[256U] = { 0U }; - uint8_t b24[256U] = { 0U }; - uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; - uint8_t *b35 = b.snd.snd.snd; - uint8_t *b25 = b.snd.snd.fst; - uint8_t *b15 = b.snd.fst; - uint8_t *b05 = b.fst; - b05[rateInBytes1 - 1U] = 0x80U; - b15[rateInBytes1 - 1U] = 0x80U; - b25[rateInBytes1 - 1U] = 0x80U; - b35[rateInBytes1 - 1U] = 0x80U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b.snd.snd.snd; - uint8_t *b26 = b.snd.snd.fst; - uint8_t *b16 = b.snd.fst; - uint8_t *b06 = b.fst; - ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); - ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); - ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); - ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); - ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); - ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); - ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); - ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); - ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); - ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); - ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); - ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); - ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); - ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); - ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); - ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); - ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); - ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); - ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); - ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); - ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); - ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); - ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); - ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); - ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; - Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; - Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; - Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 - v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; - Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; - Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; - Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; - Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; - Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; - Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; - Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 - v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; - Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; - Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; - Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; - Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; - Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; - Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; - Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 - v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; - Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; - Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; - Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; - Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; - Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; - Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; - Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 - v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; - Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; - Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; - Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; - Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; - Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; - Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; - Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 - v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; - Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; - Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; - Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; - Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; - Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; - Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; - Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 - v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; - Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; - Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; - Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; - Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; - Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; - Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; - Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 - v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; - Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; - Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; - Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; - Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; - Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; - Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; - Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); - Lib_IntVector_Intrinsics_vec256 - v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); - Lib_IntVector_Intrinsics_vec256 - v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); - Lib_IntVector_Intrinsics_vec256 - v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); - Lib_IntVector_Intrinsics_vec256 - v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 - v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; - Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; - Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; - Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; - ws34[0U] = ws01; - ws34[1U] = ws112; - ws34[2U] = ws212; - ws34[3U] = ws35; - ws34[4U] = ws41; - ws34[5U] = ws51; - ws34[6U] = ws61; - ws34[7U] = ws71; - ws34[8U] = ws81; - ws34[9U] = ws91; - ws34[10U] = ws101; - ws34[11U] = ws113; - ws34[12U] = ws121; - ws34[13U] = ws131; - ws34[14U] = ws141; - ws34[15U] = ws151; - ws34[16U] = ws161; - ws34[17U] = ws171; - ws34[18U] = ws181; - ws34[19U] = ws191; - ws34[20U] = ws201; - ws34[21U] = ws213; - ws34[22U] = ws221; - ws34[23U] = ws231; - ws34[24U] = ws241; - ws34[25U] = ws251; - ws34[26U] = ws261; - ws34[27U] = ws271; - ws34[28U] = ws281; - ws34[29U] = ws291; - ws34[30U] = ws301; - ws34[31U] = ws311; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____17, - Lib_IntVector_Intrinsics_vec256_xor(uu____18, - Lib_IntVector_Intrinsics_vec256_xor(uu____19, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____20, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____22 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v0 = - Lib_IntVector_Intrinsics_vec256_xor(uu____23, - Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v1 = - Lib_IntVector_Intrinsics_vec256_xor(uu____25, - Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v2 = - Lib_IntVector_Intrinsics_vec256_xor(uu____27, - Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v3 = - Lib_IntVector_Intrinsics_vec256_xor(uu____29, - Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____31, - Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____33, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) - { - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____34, - Lib_IntVector_Intrinsics_vec256_xor(uu____35, - Lib_IntVector_Intrinsics_vec256_xor(uu____36, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____37, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____39 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v023 = - Lib_IntVector_Intrinsics_vec256_xor(uu____40, - Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v123 = - Lib_IntVector_Intrinsics_vec256_xor(uu____42, - Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v223 = - Lib_IntVector_Intrinsics_vec256_xor(uu____44, - Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v323 = - Lib_IntVector_Intrinsics_vec256_xor(uu____46, - Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____48, - Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); - s[0U + 5U * i] = v023; - s[1U + 5U * i] = v123; - s[2U + 5U * i] = v223; - s[3U + 5U * i] = v323; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____50, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } - } - uint32_t remOut = 28U % rateInBytes1; - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); - memcpy(b1 + 28U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); - memcpy(b2 + 28U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); - memcpy(b36 + 28U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); -} - -void -Hacl_Hash_SHA3_Simd256_sha3_256( - uint8_t *output0, - uint8_t *output1, - uint8_t *output2, - uint8_t *output3, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint32_t inputByteLen -) -{ - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rateInBytes1 = 136U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) - { - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b_.snd.snd.snd; - uint8_t *b2 = b_.snd.snd.fst; - uint8_t *b1 = b_.snd.fst; - uint8_t *b0 = b_.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); - Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); - Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); - Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); - Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); - Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); - Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); - Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); - Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); - Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - ws[16U] = ws16; - ws[17U] = ws17; - ws[18U] = ws18; - ws[19U] = ws19; - ws[20U] = ws20; - ws[21U] = ws21; - ws[22U] = ws22; - ws[23U] = ws23; - ws[24U] = ws24; - ws[25U] = ws25; - ws[26U] = ws26; - ws[27U] = ws27; - ws[28U] = ws28; - ws[29U] = ws29; - ws[30U] = ws30; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____0, - Lib_IntVector_Intrinsics_vec256_xor(uu____1, - Lib_IntVector_Intrinsics_vec256_xor(uu____2, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____3, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____5 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v07 = - Lib_IntVector_Intrinsics_vec256_xor(uu____6, - Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v17 = - Lib_IntVector_Intrinsics_vec256_xor(uu____8, - Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v27 = - Lib_IntVector_Intrinsics_vec256_xor(uu____10, - Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v37 = - Lib_IntVector_Intrinsics_vec256_xor(uu____12, - Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____14, - Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); - s[0U + 5U * i] = v07; - s[1U + 5U * i] = v17; - s[2U + 5U * i] = v27; - s[3U + 5U * i] = v37; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____16, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); } uint8_t b00[256U] = { 0U }; uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -5394,10 +462,10 @@ Hacl_Hash_SHA3_Simd256_sha3_256( uint8_t *b22 = b_.snd.snd.fst; uint8_t *b12 = b_.snd.fst; uint8_t *b02 = b_.fst; - b02[inputByteLen % rateInBytes1] = 0x06U; - b12[inputByteLen % rateInBytes1] = 0x06U; - b22[inputByteLen % rateInBytes1] = 0x06U; - b32[inputByteLen % rateInBytes1] = 0x06U; + b02[inputByteLen % rateInBytes1] = 0x1FU; + b12[inputByteLen % rateInBytes1] = 0x1FU; + b22[inputByteLen % rateInBytes1] = 0x1FU; + b32[inputByteLen % rateInBytes1] = 0x1FU; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; uint8_t *b33 = b_.snd.snd.snd; uint8_t *b23 = b_.snd.snd.fst; @@ -5663,61 +731,367 @@ Hacl_Hash_SHA3_Simd256_sha3_256( { s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); } - uint8_t b04[256U] = { 0U }; - uint8_t b14[256U] = { 0U }; - uint8_t b24[256U] = { 0U }; - uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; - uint8_t *b35 = b.snd.snd.snd; - uint8_t *b25 = b.snd.snd.fst; - uint8_t *b15 = b.snd.fst; - uint8_t *b05 = b.fst; - b05[rateInBytes1 - 1U] = 0x80U; - b15[rateInBytes1 - 1U] = 0x80U; - b25[rateInBytes1 - 1U] = 0x80U; - b35[rateInBytes1 - 1U] = 0x80U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b.snd.snd.snd; - uint8_t *b26 = b.snd.snd.fst; - uint8_t *b16 = b.snd.fst; - uint8_t *b06 = b.fst; - ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); - ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); - ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); - ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); - ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); - ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); - ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); - ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); - ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); - ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); - ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); - ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); - ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); - ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); - ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); - ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); - ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); - ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); - ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); - ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); - ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); - ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); - ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); - ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); - ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; - Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; - Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; - Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; Lib_IntVector_Intrinsics_vec256 v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); Lib_IntVector_Intrinsics_vec256 @@ -5734,14 +1108,14 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); Lib_IntVector_Intrinsics_vec256 v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; - Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; - Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; - Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; - Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; - Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; - Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; - Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; Lib_IntVector_Intrinsics_vec256 v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); Lib_IntVector_Intrinsics_vec256 @@ -5758,14 +1132,14 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); Lib_IntVector_Intrinsics_vec256 v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; - Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; - Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; - Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; - Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; - Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; - Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; - Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; Lib_IntVector_Intrinsics_vec256 v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); Lib_IntVector_Intrinsics_vec256 @@ -5782,14 +1156,14 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); Lib_IntVector_Intrinsics_vec256 v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; - Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; - Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; - Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; - Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; - Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; - Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; - Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; Lib_IntVector_Intrinsics_vec256 v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); Lib_IntVector_Intrinsics_vec256 @@ -5806,14 +1180,14 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); Lib_IntVector_Intrinsics_vec256 v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; - Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; - Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; - Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; - Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; - Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; - Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; - Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; Lib_IntVector_Intrinsics_vec256 v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); Lib_IntVector_Intrinsics_vec256 @@ -5830,14 +1204,14 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); Lib_IntVector_Intrinsics_vec256 v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; - Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; - Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; - Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; - Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; - Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; - Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; - Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; Lib_IntVector_Intrinsics_vec256 v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); Lib_IntVector_Intrinsics_vec256 @@ -5854,14 +1228,14 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); Lib_IntVector_Intrinsics_vec256 v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; - Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; - Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; - Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; - Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; - Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; - Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; - Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; Lib_IntVector_Intrinsics_vec256 v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); Lib_IntVector_Intrinsics_vec256 @@ -5878,22 +1252,22 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); Lib_IntVector_Intrinsics_vec256 v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; - Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; - Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; - Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; - Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; - Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; - Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; - Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; Lib_IntVector_Intrinsics_vec256 - v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); Lib_IntVector_Intrinsics_vec256 v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 @@ -5902,1025 +1276,646 @@ Hacl_Hash_SHA3_Simd256_sha3_256( v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); Lib_IntVector_Intrinsics_vec256 v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; - Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; - Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; - Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; - ws34[0U] = ws01; - ws34[1U] = ws112; - ws34[2U] = ws212; - ws34[3U] = ws35; - ws34[4U] = ws41; - ws34[5U] = ws51; - ws34[6U] = ws61; - ws34[7U] = ws71; - ws34[8U] = ws81; - ws34[9U] = ws91; - ws34[10U] = ws101; - ws34[11U] = ws113; - ws34[12U] = ws121; - ws34[13U] = ws131; - ws34[14U] = ws141; - ws34[15U] = ws151; - ws34[16U] = ws161; - ws34[17U] = ws171; - ws34[18U] = ws181; - ws34[19U] = ws191; - ws34[20U] = ws201; - ws34[21U] = ws213; - ws34[22U] = ws221; - ws34[23U] = ws231; - ws34[24U] = ws241; - ws34[25U] = ws251; - ws34[26U] = ws261; - ws34[27U] = ws271; - ws34[28U] = ws281; - ws34[29U] = ws291; - ws34[30U] = ws301; - ws34[31U] = ws311; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____17, - Lib_IntVector_Intrinsics_vec256_xor(uu____18, - Lib_IntVector_Intrinsics_vec256_xor(uu____19, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____20, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____22 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v0 = - Lib_IntVector_Intrinsics_vec256_xor(uu____23, - Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v1 = - Lib_IntVector_Intrinsics_vec256_xor(uu____25, - Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v2 = - Lib_IntVector_Intrinsics_vec256_xor(uu____27, - Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v3 = - Lib_IntVector_Intrinsics_vec256_xor(uu____29, - Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____31, - Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____33, - Lib_IntVector_Intrinsics_vec256_load64(c)); + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); } - for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + Hacl_Hash_SHA2_uint8_4p + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + Hacl_Hash_SHA2_uint8_4p + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) { - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____34, - Lib_IntVector_Intrinsics_vec256_xor(uu____35, - Lib_IntVector_Intrinsics_vec256_xor(uu____36, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____37, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____39 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v023 = - Lib_IntVector_Intrinsics_vec256_xor(uu____40, - Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v123 = - Lib_IntVector_Intrinsics_vec256_xor(uu____42, - Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v223 = - Lib_IntVector_Intrinsics_vec256_xor(uu____44, - Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v323 = - Lib_IntVector_Intrinsics_vec256_xor(uu____46, - Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____48, - Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); - s[0U + 5U * i] = v023; - s[1U + 5U * i] = v123; - s[2U + 5U * i] = v223; - s[3U + 5U * i] = v323; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____50, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); } - uint32_t remOut = 32U % rateInBytes1; - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x1FU; + b12[inputByteLen % rateInBytes1] = 0x1FU; + b22[inputByteLen % rateInBytes1] = 0x1FU; + b32[inputByteLen % rateInBytes1] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); - memcpy(b1 + 32U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); - memcpy(b2 + 32U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); - memcpy(b36 + 32U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); -} - -void -Hacl_Hash_SHA3_Simd256_sha3_384( - uint8_t *output0, - uint8_t *output1, - uint8_t *output2, - uint8_t *output3, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint32_t inputByteLen -) -{ - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rateInBytes1 = 104U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes1; i0++) { - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint8_t hbuf[1024U] = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b_.snd.snd.snd; - uint8_t *b2 = b_.snd.snd.fst; - uint8_t *b1 = b_.snd.fst; - uint8_t *b0 = b_.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - ws[16U] = ws16; - ws[17U] = ws17; - ws[18U] = ws18; - ws[19U] = ws19; - ws[20U] = ws20; - ws[21U] = ws21; - ws[22U] = ws22; - ws[23U] = ws23; - ws[24U] = ws24; - ws[25U] = ws25; - ws[26U] = ws26; - ws[27U] = ws27; - ws[28U] = ws28; - ws[29U] = ws29; - ws[30U] = ws30; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; ws[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) + for (uint32_t i = 0U; i < 32U; i++) { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); for (uint32_t i1 = 0U; i1 < 24U; i1++) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; @@ -6975,28 +1970,28 @@ Hacl_Hash_SHA3_Simd256_sha3_384( Lib_IntVector_Intrinsics_vec256 uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v07 = + v015 = Lib_IntVector_Intrinsics_vec256_xor(uu____6, Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; Lib_IntVector_Intrinsics_vec256 uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v17 = + v115 = Lib_IntVector_Intrinsics_vec256_xor(uu____8, Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; Lib_IntVector_Intrinsics_vec256 uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v27 = + v215 = Lib_IntVector_Intrinsics_vec256_xor(uu____10, Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; Lib_IntVector_Intrinsics_vec256 uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v37 = + v315 = Lib_IntVector_Intrinsics_vec256_xor(uu____12, Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; @@ -7006,10 +2001,10 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v4 = Lib_IntVector_Intrinsics_vec256_xor(uu____14, Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); - s[0U + 5U * i] = v07; - s[1U + 5U * i] = v17; - s[2U + 5U * i] = v27; - s[3U + 5U * i] = v37; + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; s[4U + 5U * i] = v4;); uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; @@ -7018,11 +2013,294 @@ Hacl_Hash_SHA3_Simd256_sha3_384( Lib_IntVector_Intrinsics_vec256_load64(c)); } } + uint32_t remOut = outputByteLen % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + Hacl_Hash_SHA2_uint8_4p + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + Hacl_Hash_SHA2_uint8_4p + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 144U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } uint8_t b00[256U] = { 0U }; uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -7310,61 +2588,367 @@ Hacl_Hash_SHA3_Simd256_sha3_384( { s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); } - uint8_t b04[256U] = { 0U }; - uint8_t b14[256U] = { 0U }; - uint8_t b24[256U] = { 0U }; - uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; - uint8_t *b35 = b.snd.snd.snd; - uint8_t *b25 = b.snd.snd.fst; - uint8_t *b15 = b.snd.fst; - uint8_t *b05 = b.fst; - b05[rateInBytes1 - 1U] = 0x80U; - b15[rateInBytes1 - 1U] = 0x80U; - b25[rateInBytes1 - 1U] = 0x80U; - b35[rateInBytes1 - 1U] = 0x80U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b.snd.snd.snd; - uint8_t *b26 = b.snd.snd.fst; - uint8_t *b16 = b.snd.fst; - uint8_t *b06 = b.fst; - ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); - ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); - ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); - ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); - ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); - ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); - ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); - ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); - ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); - ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); - ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); - ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); - ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); - ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); - ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); - ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); - ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); - ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); - ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); - ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); - ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); - ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); - ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); - ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); - ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; - Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; - Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; - Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 28U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; Lib_IntVector_Intrinsics_vec256 v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); Lib_IntVector_Intrinsics_vec256 @@ -7381,14 +2965,14 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); Lib_IntVector_Intrinsics_vec256 v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; - Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; - Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; - Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; - Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; - Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; - Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; - Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; Lib_IntVector_Intrinsics_vec256 v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); Lib_IntVector_Intrinsics_vec256 @@ -7405,14 +2989,14 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); Lib_IntVector_Intrinsics_vec256 v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; - Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; - Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; - Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; - Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; - Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; - Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; - Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; Lib_IntVector_Intrinsics_vec256 v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); Lib_IntVector_Intrinsics_vec256 @@ -7429,14 +3013,14 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); Lib_IntVector_Intrinsics_vec256 v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; - Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; - Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; - Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; - Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; - Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; - Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; - Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; Lib_IntVector_Intrinsics_vec256 v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); Lib_IntVector_Intrinsics_vec256 @@ -7453,14 +3037,14 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); Lib_IntVector_Intrinsics_vec256 v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; - Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; - Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; - Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; - Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; - Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; - Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; - Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; Lib_IntVector_Intrinsics_vec256 v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); Lib_IntVector_Intrinsics_vec256 @@ -7477,14 +3061,14 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); Lib_IntVector_Intrinsics_vec256 v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; - Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; - Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; - Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; - Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; - Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; - Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; - Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; Lib_IntVector_Intrinsics_vec256 v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); Lib_IntVector_Intrinsics_vec256 @@ -7501,14 +3085,14 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); Lib_IntVector_Intrinsics_vec256 v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; - Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; - Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; - Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; - Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; - Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; - Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; - Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; Lib_IntVector_Intrinsics_vec256 v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); Lib_IntVector_Intrinsics_vec256 @@ -7525,22 +3109,22 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); Lib_IntVector_Intrinsics_vec256 v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; - Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; - Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; - Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; - Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; - Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; - Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; - Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; Lib_IntVector_Intrinsics_vec256 - v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); Lib_IntVector_Intrinsics_vec256 v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 @@ -7549,1025 +3133,645 @@ Hacl_Hash_SHA3_Simd256_sha3_384( v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); Lib_IntVector_Intrinsics_vec256 v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; - Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; - Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; - Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; - ws34[0U] = ws01; - ws34[1U] = ws112; - ws34[2U] = ws212; - ws34[3U] = ws35; - ws34[4U] = ws41; - ws34[5U] = ws51; - ws34[6U] = ws61; - ws34[7U] = ws71; - ws34[8U] = ws81; - ws34[9U] = ws91; - ws34[10U] = ws101; - ws34[11U] = ws113; - ws34[12U] = ws121; - ws34[13U] = ws131; - ws34[14U] = ws141; - ws34[15U] = ws151; - ws34[16U] = ws161; - ws34[17U] = ws171; - ws34[18U] = ws181; - ws34[19U] = ws191; - ws34[20U] = ws201; - ws34[21U] = ws213; - ws34[22U] = ws221; - ws34[23U] = ws231; - ws34[24U] = ws241; - ws34[25U] = ws251; - ws34[26U] = ws261; - ws34[27U] = ws271; - ws34[28U] = ws281; - ws34[29U] = ws291; - ws34[30U] = ws301; - ws34[31U] = ws311; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____17, - Lib_IntVector_Intrinsics_vec256_xor(uu____18, - Lib_IntVector_Intrinsics_vec256_xor(uu____19, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____20, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____22 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v0 = - Lib_IntVector_Intrinsics_vec256_xor(uu____23, - Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v1 = - Lib_IntVector_Intrinsics_vec256_xor(uu____25, - Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v2 = - Lib_IntVector_Intrinsics_vec256_xor(uu____27, - Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v3 = - Lib_IntVector_Intrinsics_vec256_xor(uu____29, - Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____31, - Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____33, - Lib_IntVector_Intrinsics_vec256_load64(c)); + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); } - for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 28U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 28U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 28U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + Hacl_Hash_SHA2_uint8_4p + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + Hacl_Hash_SHA2_uint8_4p + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 136U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) { - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____34, - Lib_IntVector_Intrinsics_vec256_xor(uu____35, - Lib_IntVector_Intrinsics_vec256_xor(uu____36, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____37, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____39 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v023 = - Lib_IntVector_Intrinsics_vec256_xor(uu____40, - Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v123 = - Lib_IntVector_Intrinsics_vec256_xor(uu____42, - Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v223 = - Lib_IntVector_Intrinsics_vec256_xor(uu____44, - Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v323 = - Lib_IntVector_Intrinsics_vec256_xor(uu____46, - Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____48, - Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); - s[0U + 5U * i] = v023; - s[1U + 5U * i] = v123; - s[2U + 5U * i] = v223; - s[3U + 5U * i] = v323; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____50, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); } - uint32_t remOut = 48U % rateInBytes1; - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); - memcpy(b1 + 48U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); - memcpy(b2 + 48U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); - memcpy(b36 + 48U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); -} - -void -Hacl_Hash_SHA3_Simd256_sha3_512( - uint8_t *output0, - uint8_t *output1, - uint8_t *output2, - uint8_t *output3, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint32_t inputByteLen -) -{ - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rateInBytes1 = 72U; - for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes1; i0++) + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes1; i0++) { - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint8_t *b31 = ib.snd.snd.snd; - uint8_t *b21 = ib.snd.snd.fst; - uint8_t *b11 = ib.snd.fst; - uint8_t *b01 = ib.fst; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl1, b11 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl2, b21 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); - memcpy(bl3, b31 + i0 * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + uint8_t hbuf[1024U] = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b_.snd.snd.snd; - uint8_t *b2 = b_.snd.snd.fst; - uint8_t *b1 = b_.snd.fst; - uint8_t *b0 = b_.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - ws[16U] = ws16; - ws[17U] = ws17; - ws[18U] = ws18; - ws[19U] = ws19; - ws[20U] = ws20; - ws[21U] = ws21; - ws[22U] = ws22; - ws[23U] = ws23; - ws[24U] = ws24; - ws[25U] = ws25; - ws[26U] = ws26; - ws[27U] = ws27; - ws[28U] = ws28; - ws[29U] = ws29; - ws[30U] = ws30; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; ws[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) + for (uint32_t i = 0U; i < 32U; i++) { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); for (uint32_t i1 = 0U; i1 < 24U; i1++) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; @@ -8622,28 +3826,28 @@ Hacl_Hash_SHA3_Simd256_sha3_512( Lib_IntVector_Intrinsics_vec256 uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v07 = + v015 = Lib_IntVector_Intrinsics_vec256_xor(uu____6, Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; Lib_IntVector_Intrinsics_vec256 uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v17 = + v115 = Lib_IntVector_Intrinsics_vec256_xor(uu____8, Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; Lib_IntVector_Intrinsics_vec256 uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v27 = + v215 = Lib_IntVector_Intrinsics_vec256_xor(uu____10, Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; Lib_IntVector_Intrinsics_vec256 uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v37 = + v315 = Lib_IntVector_Intrinsics_vec256_xor(uu____12, Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; @@ -8653,10 +3857,10 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v4 = Lib_IntVector_Intrinsics_vec256_xor(uu____14, Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); - s[0U + 5U * i] = v07; - s[1U + 5U * i] = v17; - s[2U + 5U * i] = v27; - s[3U + 5U * i] = v37; + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; s[4U + 5U * i] = v4;); uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; @@ -8665,11 +3869,294 @@ Hacl_Hash_SHA3_Simd256_sha3_512( Lib_IntVector_Intrinsics_vec256_load64(c)); } } + uint32_t remOut = 32U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 32U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 32U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 32U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + Hacl_Hash_SHA2_uint8_4p + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + Hacl_Hash_SHA2_uint8_4p + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 104U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); + } uint8_t b00[256U] = { 0U }; uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % rateInBytes1; uint8_t *b31 = ib.snd.snd.snd; @@ -8957,61 +4444,367 @@ Hacl_Hash_SHA3_Simd256_sha3_512( { s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); } - uint8_t b04[256U] = { 0U }; - uint8_t b14[256U] = { 0U }; - uint8_t b24[256U] = { 0U }; - uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; - uint8_t *b35 = b.snd.snd.snd; - uint8_t *b25 = b.snd.snd.fst; - uint8_t *b15 = b.snd.fst; - uint8_t *b05 = b.fst; - b05[rateInBytes1 - 1U] = 0x80U; - b15[rateInBytes1 - 1U] = 0x80U; - b25[rateInBytes1 - 1U] = 0x80U; - b35[rateInBytes1 - 1U] = 0x80U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b.snd.snd.snd; - uint8_t *b26 = b.snd.snd.fst; - uint8_t *b16 = b.snd.fst; - uint8_t *b06 = b.fst; - ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); - ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); - ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); - ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); - ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); - ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); - ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); - ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); - ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); - ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); - ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); - ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); - ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); - ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); - ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); - ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); - ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); - ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); - ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); - ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); - ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); - ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); - ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); - ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); - ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; - Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; - Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; - Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; + uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 48U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; Lib_IntVector_Intrinsics_vec256 v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); Lib_IntVector_Intrinsics_vec256 @@ -9028,14 +4821,14 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); Lib_IntVector_Intrinsics_vec256 v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; - Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; - Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; - Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; - Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; - Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; - Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; - Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; Lib_IntVector_Intrinsics_vec256 v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); Lib_IntVector_Intrinsics_vec256 @@ -9052,14 +4845,14 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); Lib_IntVector_Intrinsics_vec256 v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; - Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; - Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; - Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; - Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; - Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; - Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; - Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; Lib_IntVector_Intrinsics_vec256 v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); Lib_IntVector_Intrinsics_vec256 @@ -9076,14 +4869,14 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); Lib_IntVector_Intrinsics_vec256 v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; - Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; - Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; - Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; - Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; - Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; - Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; - Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; Lib_IntVector_Intrinsics_vec256 v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); Lib_IntVector_Intrinsics_vec256 @@ -9100,14 +4893,14 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); Lib_IntVector_Intrinsics_vec256 v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; - Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; - Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; - Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; - Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; - Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; - Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; - Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; Lib_IntVector_Intrinsics_vec256 v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); Lib_IntVector_Intrinsics_vec256 @@ -9124,14 +4917,14 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); Lib_IntVector_Intrinsics_vec256 v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; - Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; - Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; - Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; - Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; - Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; - Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; - Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; Lib_IntVector_Intrinsics_vec256 v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); Lib_IntVector_Intrinsics_vec256 @@ -9148,14 +4941,14 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); Lib_IntVector_Intrinsics_vec256 v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; - Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; - Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; - Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; - Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; - Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; - Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; - Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; Lib_IntVector_Intrinsics_vec256 v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); Lib_IntVector_Intrinsics_vec256 @@ -9172,22 +4965,22 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); Lib_IntVector_Intrinsics_vec256 v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; - Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; - Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; - Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; - Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; - Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; - Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; - Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; Lib_IntVector_Intrinsics_vec256 - v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); Lib_IntVector_Intrinsics_vec256 v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 @@ -9196,1049 +4989,645 @@ Hacl_Hash_SHA3_Simd256_sha3_512( v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); Lib_IntVector_Intrinsics_vec256 v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; - Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; - Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; - Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; - ws34[0U] = ws01; - ws34[1U] = ws112; - ws34[2U] = ws212; - ws34[3U] = ws35; - ws34[4U] = ws41; - ws34[5U] = ws51; - ws34[6U] = ws61; - ws34[7U] = ws71; - ws34[8U] = ws81; - ws34[9U] = ws91; - ws34[10U] = ws101; - ws34[11U] = ws113; - ws34[12U] = ws121; - ws34[13U] = ws131; - ws34[14U] = ws141; - ws34[15U] = ws151; - ws34[16U] = ws161; - ws34[17U] = ws171; - ws34[18U] = ws181; - ws34[19U] = ws191; - ws34[20U] = ws201; - ws34[21U] = ws213; - ws34[22U] = ws221; - ws34[23U] = ws231; - ws34[24U] = ws241; - ws34[25U] = ws251; - ws34[26U] = ws261; - ws34[27U] = ws271; - ws34[28U] = ws281; - ws34[29U] = ws291; - ws34[30U] = ws301; - ws34[31U] = ws311; - for (uint32_t i = 0U; i < 25U; i++) - { - s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____17, - Lib_IntVector_Intrinsics_vec256_xor(uu____18, - Lib_IntVector_Intrinsics_vec256_xor(uu____19, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____20, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____22 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v0 = - Lib_IntVector_Intrinsics_vec256_xor(uu____23, - Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v1 = - Lib_IntVector_Intrinsics_vec256_xor(uu____25, - Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v2 = - Lib_IntVector_Intrinsics_vec256_xor(uu____27, - Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v3 = - Lib_IntVector_Intrinsics_vec256_xor(uu____29, - Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____31, - Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); - s[0U + 5U * i] = v0; - s[1U + 5U * i] = v1; - s[2U + 5U * i] = v2; - s[3U + 5U * i] = v3; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____33, - Lib_IntVector_Intrinsics_vec256_load64(c)); + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); } - for (uint32_t i0 = 0U; i0 < 64U / rateInBytes1; i0++) + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 48U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 48U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 48U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + Hacl_Hash_SHA2_uint8_4p + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + Hacl_Hash_SHA2_uint8_4p + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + uint32_t rateInBytes1 = 72U; + for (uint32_t i = 0U; i < inputByteLen / rateInBytes1; i++) { - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); - Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); - Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); - Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); - Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); - Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); - Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); - Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); - Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); - Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); - Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); - Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); - Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); - Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); - Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); - Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); - Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); - Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); - Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); - Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); - Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); - Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) - { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); - } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); - memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); - memcpy(b36 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____34, - Lib_IntVector_Intrinsics_vec256_xor(uu____35, - Lib_IntVector_Intrinsics_vec256_xor(uu____36, - Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____37, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = s[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____39 = current; - s[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, - r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v023 = - Lib_IntVector_Intrinsics_vec256_xor(uu____40, - Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v123 = - Lib_IntVector_Intrinsics_vec256_xor(uu____42, - Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v223 = - Lib_IntVector_Intrinsics_vec256_xor(uu____44, - Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v323 = - Lib_IntVector_Intrinsics_vec256_xor(uu____46, - Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____48, - Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); - s[0U + 5U * i] = v023; - s[1U + 5U * i] = v123; - s[2U + 5U * i] = v223; - s[3U + 5U * i] = v323; - s[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; - s[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____50, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b3 = ib.snd.snd.snd; + uint8_t *b2 = ib.snd.snd.fst; + uint8_t *b1 = ib.snd.fst; + uint8_t *b0 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl1, b1 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl2, b2 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + memcpy(bl3, b3 + i * rateInBytes1, rateInBytes1 * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b_, s); } - uint32_t remOut = 64U % rateInBytes1; - uint8_t hbuf[1024U] = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem = inputByteLen % rateInBytes1; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem, rem * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem, rem * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[inputByteLen % rateInBytes1] = 0x06U; + b12[inputByteLen % rateInBytes1] = 0x06U; + b22[inputByteLen % rateInBytes1] = 0x06U; + b32[inputByteLen % rateInBytes1] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; Lib_IntVector_Intrinsics_vec256 - v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); Lib_IntVector_Intrinsics_vec256 - v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); Lib_IntVector_Intrinsics_vec256 - v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); Lib_IntVector_Intrinsics_vec256 - v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); Lib_IntVector_Intrinsics_vec256 - v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); Lib_IntVector_Intrinsics_vec256 - v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); Lib_IntVector_Intrinsics_vec256 - v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); Lib_IntVector_Intrinsics_vec256 - v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; - Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; Lib_IntVector_Intrinsics_vec256 - v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); Lib_IntVector_Intrinsics_vec256 - v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); Lib_IntVector_Intrinsics_vec256 - v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); Lib_IntVector_Intrinsics_vec256 - v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); Lib_IntVector_Intrinsics_vec256 - v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); Lib_IntVector_Intrinsics_vec256 - v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); Lib_IntVector_Intrinsics_vec256 - v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); Lib_IntVector_Intrinsics_vec256 - v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; - Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; Lib_IntVector_Intrinsics_vec256 - v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); Lib_IntVector_Intrinsics_vec256 - v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); Lib_IntVector_Intrinsics_vec256 - v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); Lib_IntVector_Intrinsics_vec256 - v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); Lib_IntVector_Intrinsics_vec256 - v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); Lib_IntVector_Intrinsics_vec256 - v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); Lib_IntVector_Intrinsics_vec256 - v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); Lib_IntVector_Intrinsics_vec256 - v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; - Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; Lib_IntVector_Intrinsics_vec256 - v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); Lib_IntVector_Intrinsics_vec256 - v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); Lib_IntVector_Intrinsics_vec256 - v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); Lib_IntVector_Intrinsics_vec256 - v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); Lib_IntVector_Intrinsics_vec256 - v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); Lib_IntVector_Intrinsics_vec256 - v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); Lib_IntVector_Intrinsics_vec256 - v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); Lib_IntVector_Intrinsics_vec256 - v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; - Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; Lib_IntVector_Intrinsics_vec256 - v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); Lib_IntVector_Intrinsics_vec256 - v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); Lib_IntVector_Intrinsics_vec256 - v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); Lib_IntVector_Intrinsics_vec256 - v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); Lib_IntVector_Intrinsics_vec256 - v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); Lib_IntVector_Intrinsics_vec256 - v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); Lib_IntVector_Intrinsics_vec256 - v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); Lib_IntVector_Intrinsics_vec256 - v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; - Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; Lib_IntVector_Intrinsics_vec256 - v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); Lib_IntVector_Intrinsics_vec256 - v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); Lib_IntVector_Intrinsics_vec256 - v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); Lib_IntVector_Intrinsics_vec256 - v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); Lib_IntVector_Intrinsics_vec256 - v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); Lib_IntVector_Intrinsics_vec256 - v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); Lib_IntVector_Intrinsics_vec256 - v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); Lib_IntVector_Intrinsics_vec256 - v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; - Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); Lib_IntVector_Intrinsics_vec256 - v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); Lib_IntVector_Intrinsics_vec256 - v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); Lib_IntVector_Intrinsics_vec256 - v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); Lib_IntVector_Intrinsics_vec256 - v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; - Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; Lib_IntVector_Intrinsics_vec256 - v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); Lib_IntVector_Intrinsics_vec256 - v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); Lib_IntVector_Intrinsics_vec256 - v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); Lib_IntVector_Intrinsics_vec256 - v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); Lib_IntVector_Intrinsics_vec256 - v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); Lib_IntVector_Intrinsics_vec256 - v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); Lib_IntVector_Intrinsics_vec256 - v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); Lib_IntVector_Intrinsics_vec256 - v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; - ws[0U] = ws0; - ws[1U] = ws4; - ws[2U] = ws8; - ws[3U] = ws12; - ws[4U] = ws16; - ws[5U] = ws20; - ws[6U] = ws24; - ws[7U] = ws28; - ws[8U] = ws1; - ws[9U] = ws5; - ws[10U] = ws9; - ws[11U] = ws13; - ws[12U] = ws17; - ws[13U] = ws21; - ws[14U] = ws25; - ws[15U] = ws29; - ws[16U] = ws2; - ws[17U] = ws6; - ws[18U] = ws10; - ws[19U] = ws14; - ws[20U] = ws18; - ws[21U] = ws22; - ws[22U] = ws26; - ws[23U] = ws30; - ws[24U] = ws3; - ws[25U] = ws7; - ws[26U] = ws11; - ws[27U] = ws15; - ws[28U] = ws19; - ws[29U] = ws23; - ws[30U] = ws27; - ws[31U] = ws31; - for (uint32_t i = 0U; i < 32U; i++) + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) { - Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); } - uint8_t *b36 = rb.snd.snd.snd; - uint8_t *b2 = rb.snd.snd.fst; - uint8_t *b1 = rb.snd.fst; - uint8_t *b0 = rb.fst; - memcpy(b0 + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); - memcpy(b1 + 64U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); - memcpy(b2 + 64U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); - memcpy(b36 + 64U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); -} - -/** -Allocate quadruple state buffer (200-bytes for each) -*/ -Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void) -{ - Lib_IntVector_Intrinsics_vec256 - *buf = - (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, - sizeof (Lib_IntVector_Intrinsics_vec256) * 25U); - memset(buf, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - return buf; -} - -/** -Free quadruple state buffer -*/ -void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s) -{ - KRML_ALIGNED_FREE(s); -} - -/** -Absorb number of blocks of 4 input buffers and write the output states - - This function is intended to receive a quadruple hash state and 4 input buffers. - It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), - any additional bytes of final partial block for each buffer are ignored. - - The argument `state` (IN/OUT) points to quadruple hash state, - i.e., Lib_IntVector_Intrinsics_vec256[25] - The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes - of valid memory for each buffer, i.e., uint8_t[inputByteLen] -*/ -void -Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( - Lib_IntVector_Intrinsics_vec256 *state, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint32_t inputByteLen -) -{ - for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) - { - uint8_t b00[256U] = { 0U }; - uint8_t b10[256U] = { 0U }; - uint8_t b20[256U] = { 0U }; - uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ - b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; - uint8_t *b01 = input0; - uint8_t *b11 = input1; - uint8_t *b21 = input2; - uint8_t *b31 = input3; - uint8_t *bl3 = b_.snd.snd.snd; - uint8_t *bl2 = b_.snd.snd.fst; - uint8_t *bl1 = b_.snd.fst; - uint8_t *bl0 = b_.fst; - memcpy(bl0, b01 + i0 * 168U, 168U * sizeof (uint8_t)); - memcpy(bl1, b11 + i0 * 168U, 168U * sizeof (uint8_t)); - memcpy(bl2, b21 + i0 * 168U, 168U * sizeof (uint8_t)); - memcpy(bl3, b31 + i0 * 168U, 168U * sizeof (uint8_t)); + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes1 - 1U] = 0x80U; + b15[rateInBytes1 - 1U] = 0x80U; + b25[rateInBytes1 - 1U] = 0x80U; + b3[rateInBytes1 - 1U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(rateInBytes1, b, s); + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes1; i0++) + { + uint8_t hbuf[1024U] = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3 = b_.snd.snd.snd; - uint8_t *b2 = b_.snd.snd.fst; - uint8_t *b1 = b_.snd.fst; - uint8_t *b0 = b_.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__; - Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; - Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; - Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; - Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; - Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; - Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; - Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; - Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); Lib_IntVector_Intrinsics_vec256 - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); Lib_IntVector_Intrinsics_vec256 - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); Lib_IntVector_Intrinsics_vec256 - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); Lib_IntVector_Intrinsics_vec256 - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; - Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; - Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; - Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; - Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); Lib_IntVector_Intrinsics_vec256 - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); Lib_IntVector_Intrinsics_vec256 - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; - Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; - Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; - Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; - Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; Lib_IntVector_Intrinsics_vec256 - v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); Lib_IntVector_Intrinsics_vec256 - v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); Lib_IntVector_Intrinsics_vec256 - v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); Lib_IntVector_Intrinsics_vec256 - v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); Lib_IntVector_Intrinsics_vec256 - v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); Lib_IntVector_Intrinsics_vec256 - v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); Lib_IntVector_Intrinsics_vec256 - v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); Lib_IntVector_Intrinsics_vec256 - v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; - Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; - Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; - Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; - Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); Lib_IntVector_Intrinsics_vec256 - v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); Lib_IntVector_Intrinsics_vec256 - v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); Lib_IntVector_Intrinsics_vec256 - v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); Lib_IntVector_Intrinsics_vec256 - v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; - Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; - Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; - Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; - Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; Lib_IntVector_Intrinsics_vec256 - v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); Lib_IntVector_Intrinsics_vec256 - v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); Lib_IntVector_Intrinsics_vec256 - v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); Lib_IntVector_Intrinsics_vec256 - v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); Lib_IntVector_Intrinsics_vec256 - v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); Lib_IntVector_Intrinsics_vec256 - v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); Lib_IntVector_Intrinsics_vec256 - v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); Lib_IntVector_Intrinsics_vec256 - v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 - v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); Lib_IntVector_Intrinsics_vec256 - v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); Lib_IntVector_Intrinsics_vec256 - v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - ws[16U] = ws16; - ws[17U] = ws17; - ws[18U] = ws18; - ws[19U] = ws19; - ws[20U] = ws20; - ws[21U] = ws21; - ws[22U] = ws22; - ws[23U] = ws23; - ws[24U] = ws24; - ws[25U] = ws25; - ws[26U] = ws26; - ws[27U] = ws27; - ws[28U] = ws28; - ws[29U] = ws29; - ws[30U] = ws30; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; ws[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) + for (uint32_t i = 0U; i < 32U; i++) { - state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes1, hbuf, rateInBytes1 * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes1, hbuf + 256U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes1, hbuf + 512U, rateInBytes1 * sizeof (uint8_t)); + memcpy(b35 + i0 * rateInBytes1, hbuf + 768U, rateInBytes1 * sizeof (uint8_t)); for (uint32_t i1 = 0U; i1 < 24U; i1++) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; @@ -10246,14 +5635,14 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( 0U, 5U, 1U, - Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; _C[i] = Lib_IntVector_Intrinsics_vec256_xor(uu____0, Lib_IntVector_Intrinsics_vec256_xor(uu____1, Lib_IntVector_Intrinsics_vec256_xor(uu____2, - Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); KRML_MAYBE_FOR5(i2, 0U, 5U, @@ -10270,16 +5659,16 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( 0U, 5U, 1U, - state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = state[1U]; + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; Lib_IntVector_Intrinsics_vec256 current = x; for (uint32_t i = 0U; i < 24U; i++) { uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; Lib_IntVector_Intrinsics_vec256 uu____5 = current; - state[_Y] = + s[_Y] = Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, r), Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); @@ -10289,53 +5678,360 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( 0U, 5U, 1U, - Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; Lib_IntVector_Intrinsics_vec256 - uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v07 = + v015 = Lib_IntVector_Intrinsics_vec256_xor(uu____6, - Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; Lib_IntVector_Intrinsics_vec256 - uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v17 = + v115 = Lib_IntVector_Intrinsics_vec256_xor(uu____8, - Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; Lib_IntVector_Intrinsics_vec256 - uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v27 = + v215 = Lib_IntVector_Intrinsics_vec256_xor(uu____10, - Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; Lib_IntVector_Intrinsics_vec256 - uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); Lib_IntVector_Intrinsics_vec256 - v37 = + v315 = Lib_IntVector_Intrinsics_vec256_xor(uu____12, - Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; Lib_IntVector_Intrinsics_vec256 - uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); Lib_IntVector_Intrinsics_vec256 v4 = Lib_IntVector_Intrinsics_vec256_xor(uu____14, - Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); - state[0U + 5U * i] = v07; - state[1U + 5U * i] = v17; - state[2U + 5U * i] = v27; - state[3U + 5U * i] = v37; - state[4U + 5U * i] = v4;); + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v015; + s[1U + 5U * i] = v115; + s[2U + 5U * i] = v215; + s[3U + 5U * i] = v315; + s[4U + 5U * i] = v4;); uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i1]; - Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; - state[0U] = + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = Lib_IntVector_Intrinsics_vec256_xor(uu____16, Lib_IntVector_Intrinsics_vec256_load64(c)); } } + uint32_t remOut = 64U % rateInBytes1; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v08 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b35 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 64U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 64U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b35 + 64U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +/** +Allocate quadruple state buffer (200-bytes for each) +*/ +Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void) +{ + Lib_IntVector_Intrinsics_vec256 + *buf = + (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, + sizeof (Lib_IntVector_Intrinsics_vec256) * 25U); + memset(buf, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + return buf; +} + +/** +Free quadruple state buffer +*/ +void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s) +{ + KRML_ALIGNED_FREE(s); +} + +/** +Absorb number of blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + for (uint32_t i = 0U; i < inputByteLen / 168U; i++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + Hacl_Hash_SHA2_uint8_4p + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b0 = input0; + uint8_t *b1 = input1; + uint8_t *b2 = input2; + uint8_t *b3 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b0 + i * 168U, 168U * sizeof (uint8_t)); + memcpy(bl1, b1 + i * 168U, 168U * sizeof (uint8_t)); + memcpy(bl2, b2 + i * 168U, 168U * sizeof (uint8_t)); + memcpy(bl3, b3 + i * 168U, 168U * sizeof (uint8_t)); + Hacl_Hash_SHA3_Simd256_absorb_inner_256(168U, b_, state); + } } /** @@ -10368,7 +6064,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( uint8_t b10[256U] = { 0U }; uint8_t b20[256U] = { 0U }; uint8_t b30[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; uint32_t rem = inputByteLen % 168U; uint8_t *b01 = input0; @@ -10448,10 +6144,10 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); Lib_IntVector_Intrinsics_vec256 v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 ws00 = v0__; - Lib_IntVector_Intrinsics_vec256 ws110 = v2__; - Lib_IntVector_Intrinsics_vec256 ws210 = v1__; - Lib_IntVector_Intrinsics_vec256 ws32 = v3__; + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; @@ -10472,10 +6168,10 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); Lib_IntVector_Intrinsics_vec256 v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; - Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; - Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; - Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; @@ -10496,10 +6192,10 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); Lib_IntVector_Intrinsics_vec256 v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; - Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; - Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; - Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; @@ -10517,13 +6213,13 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( Lib_IntVector_Intrinsics_vec256 v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); Lib_IntVector_Intrinsics_vec256 - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; - Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; - Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; - Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; @@ -10544,10 +6240,10 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); Lib_IntVector_Intrinsics_vec256 v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); - Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; - Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; - Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; - Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; @@ -10568,10 +6264,10 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); Lib_IntVector_Intrinsics_vec256 v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); - Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; - Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; - Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; - Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; @@ -10592,22 +6288,22 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); Lib_IntVector_Intrinsics_vec256 v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; - Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; - Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; - Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; - Lib_IntVector_Intrinsics_vec256 v07 = ws[28U]; - Lib_IntVector_Intrinsics_vec256 v17 = ws[29U]; - Lib_IntVector_Intrinsics_vec256 v27 = ws[30U]; - Lib_IntVector_Intrinsics_vec256 v37 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; Lib_IntVector_Intrinsics_vec256 - v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); Lib_IntVector_Intrinsics_vec256 - v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); Lib_IntVector_Intrinsics_vec256 - v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); Lib_IntVector_Intrinsics_vec256 v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); Lib_IntVector_Intrinsics_vec256 @@ -10616,42 +6312,42 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); Lib_IntVector_Intrinsics_vec256 v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); - Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; - Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; - Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; - Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; - ws[0U] = ws00; - ws[1U] = ws110; - ws[2U] = ws210; - ws[3U] = ws32; - ws[4U] = ws40; - ws[5U] = ws50; - ws[6U] = ws60; - ws[7U] = ws70; - ws[8U] = ws80; - ws[9U] = ws90; - ws[10U] = ws100; - ws[11U] = ws111; - ws[12U] = ws120; - ws[13U] = ws130; - ws[14U] = ws140; - ws[15U] = ws150; - ws[16U] = ws160; - ws[17U] = ws170; - ws[18U] = ws180; - ws[19U] = ws190; - ws[20U] = ws200; - ws[21U] = ws211; - ws[22U] = ws220; - ws[23U] = ws230; - ws[24U] = ws240; - ws[25U] = ws250; - ws[26U] = ws260; - ws[27U] = ws270; - ws[28U] = ws280; - ws[29U] = ws290; - ws[30U] = ws300; - ws[31U] = ws310; + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; for (uint32_t i = 0U; i < 25U; i++) { state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); @@ -10660,376 +6356,17 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_final( uint8_t b14[256U] = { 0U }; uint8_t b24[256U] = { 0U }; uint8_t b34[256U] = { 0U }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; - uint8_t *b35 = b.snd.snd.snd; - uint8_t *b25 = b.snd.snd.fst; - uint8_t *b15 = b.snd.fst; - uint8_t *b05 = b.fst; - b05[167U] = 0x80U; - b15[167U] = 0x80U; - b25[167U] = 0x80U; - b35[167U] = 0x80U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws33[32U] KRML_POST_ALIGN(32) = { 0U }; uint8_t *b3 = b.snd.snd.snd; uint8_t *b2 = b.snd.snd.fst; uint8_t *b1 = b.snd.fst; uint8_t *b0 = b.fst; - ws33[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); - ws33[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); - ws33[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); - ws33[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); - ws33[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); - ws33[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); - ws33[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); - ws33[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); - ws33[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); - ws33[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); - ws33[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); - ws33[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); - ws33[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); - ws33[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); - ws33[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); - ws33[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); - ws33[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); - ws33[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); - ws33[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); - ws33[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); - ws33[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); - ws33[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); - ws33[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); - ws33[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); - ws33[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); - ws33[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); - ws33[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); - ws33[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); - ws33[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); - ws33[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); - ws33[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); - ws33[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); - Lib_IntVector_Intrinsics_vec256 v08 = ws33[0U]; - Lib_IntVector_Intrinsics_vec256 v18 = ws33[1U]; - Lib_IntVector_Intrinsics_vec256 v28 = ws33[2U]; - Lib_IntVector_Intrinsics_vec256 v38 = ws33[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); - Lib_IntVector_Intrinsics_vec256 - v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); - Lib_IntVector_Intrinsics_vec256 - v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); - Lib_IntVector_Intrinsics_vec256 - v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 - v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); - Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; - Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; - Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; - Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; - Lib_IntVector_Intrinsics_vec256 v09 = ws33[4U]; - Lib_IntVector_Intrinsics_vec256 v19 = ws33[5U]; - Lib_IntVector_Intrinsics_vec256 v29 = ws33[6U]; - Lib_IntVector_Intrinsics_vec256 v39 = ws33[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); - Lib_IntVector_Intrinsics_vec256 - v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); - Lib_IntVector_Intrinsics_vec256 - v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); - Lib_IntVector_Intrinsics_vec256 - v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 - v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); - Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; - Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; - Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; - Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; - Lib_IntVector_Intrinsics_vec256 v010 = ws33[8U]; - Lib_IntVector_Intrinsics_vec256 v110 = ws33[9U]; - Lib_IntVector_Intrinsics_vec256 v210 = ws33[10U]; - Lib_IntVector_Intrinsics_vec256 v310 = ws33[11U]; - Lib_IntVector_Intrinsics_vec256 - v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); - Lib_IntVector_Intrinsics_vec256 - v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); - Lib_IntVector_Intrinsics_vec256 - v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); - Lib_IntVector_Intrinsics_vec256 - v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 - v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); - Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; - Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; - Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; - Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; - Lib_IntVector_Intrinsics_vec256 v011 = ws33[12U]; - Lib_IntVector_Intrinsics_vec256 v111 = ws33[13U]; - Lib_IntVector_Intrinsics_vec256 v211 = ws33[14U]; - Lib_IntVector_Intrinsics_vec256 v311 = ws33[15U]; - Lib_IntVector_Intrinsics_vec256 - v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); - Lib_IntVector_Intrinsics_vec256 - v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); - Lib_IntVector_Intrinsics_vec256 - v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); - Lib_IntVector_Intrinsics_vec256 - v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 - v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); - Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; - Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; - Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; - Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; - Lib_IntVector_Intrinsics_vec256 v012 = ws33[16U]; - Lib_IntVector_Intrinsics_vec256 v112 = ws33[17U]; - Lib_IntVector_Intrinsics_vec256 v212 = ws33[18U]; - Lib_IntVector_Intrinsics_vec256 v312 = ws33[19U]; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); - Lib_IntVector_Intrinsics_vec256 - v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); - Lib_IntVector_Intrinsics_vec256 - v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 - v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); - Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; - Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; - Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; - Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; - Lib_IntVector_Intrinsics_vec256 v013 = ws33[20U]; - Lib_IntVector_Intrinsics_vec256 v113 = ws33[21U]; - Lib_IntVector_Intrinsics_vec256 v213 = ws33[22U]; - Lib_IntVector_Intrinsics_vec256 v313 = ws33[23U]; - Lib_IntVector_Intrinsics_vec256 - v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); - Lib_IntVector_Intrinsics_vec256 - v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); - Lib_IntVector_Intrinsics_vec256 - v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); - Lib_IntVector_Intrinsics_vec256 - v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 - v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); - Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; - Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; - Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; - Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; - Lib_IntVector_Intrinsics_vec256 v014 = ws33[24U]; - Lib_IntVector_Intrinsics_vec256 v114 = ws33[25U]; - Lib_IntVector_Intrinsics_vec256 v214 = ws33[26U]; - Lib_IntVector_Intrinsics_vec256 v314 = ws33[27U]; - Lib_IntVector_Intrinsics_vec256 - v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); - Lib_IntVector_Intrinsics_vec256 - v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); - Lib_IntVector_Intrinsics_vec256 - v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); - Lib_IntVector_Intrinsics_vec256 - v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 - v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); - Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; - Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; - Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; - Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; - Lib_IntVector_Intrinsics_vec256 v0 = ws33[28U]; - Lib_IntVector_Intrinsics_vec256 v1 = ws33[29U]; - Lib_IntVector_Intrinsics_vec256 v2 = ws33[30U]; - Lib_IntVector_Intrinsics_vec256 v3 = ws33[31U]; - Lib_IntVector_Intrinsics_vec256 - v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); - Lib_IntVector_Intrinsics_vec256 - v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 - v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); - Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; - Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; - Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; - Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; - ws33[0U] = ws0; - ws33[1U] = ws1; - ws33[2U] = ws2; - ws33[3U] = ws3; - ws33[4U] = ws4; - ws33[5U] = ws5; - ws33[6U] = ws6; - ws33[7U] = ws7; - ws33[8U] = ws8; - ws33[9U] = ws9; - ws33[10U] = ws10; - ws33[11U] = ws11; - ws33[12U] = ws12; - ws33[13U] = ws13; - ws33[14U] = ws14; - ws33[15U] = ws15; - ws33[16U] = ws16; - ws33[17U] = ws17; - ws33[18U] = ws18; - ws33[19U] = ws19; - ws33[20U] = ws20; - ws33[21U] = ws21; - ws33[22U] = ws22; - ws33[23U] = ws23; - ws33[24U] = ws24; - ws33[25U] = ws25; - ws33[26U] = ws26; - ws33[27U] = ws27; - ws33[28U] = ws28; - ws33[29U] = ws29; - ws33[30U] = ws30; - ws33[31U] = ws31; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws33[i]); - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; - Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; - Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; - _C[i] = - Lib_IntVector_Intrinsics_vec256_xor(uu____0, - Lib_IntVector_Intrinsics_vec256_xor(uu____1, - Lib_IntVector_Intrinsics_vec256_xor(uu____2, - Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i1 + 4U) % 5U]; - Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i1 + 1U) % 5U]; - Lib_IntVector_Intrinsics_vec256 - _D = - Lib_IntVector_Intrinsics_vec256_xor(uu____3, - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, - 1U), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - state[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i1 + 5U * i], _D););); - Lib_IntVector_Intrinsics_vec256 x = state[1U]; - Lib_IntVector_Intrinsics_vec256 current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Hash_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Hash_SHA3_keccak_rotc[i]; - Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; - Lib_IntVector_Intrinsics_vec256 uu____5 = current; - state[_Y] = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, r), - Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v015 = - Lib_IntVector_Intrinsics_vec256_xor(uu____6, - Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v115 = - Lib_IntVector_Intrinsics_vec256_xor(uu____8, - Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v215 = - Lib_IntVector_Intrinsics_vec256_xor(uu____10, - Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v315 = - Lib_IntVector_Intrinsics_vec256_xor(uu____12, - Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); - Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; - Lib_IntVector_Intrinsics_vec256 - uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); - Lib_IntVector_Intrinsics_vec256 - v4 = - Lib_IntVector_Intrinsics_vec256_xor(uu____14, - Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); - state[0U + 5U * i] = v015; - state[1U + 5U * i] = v115; - state[2U + 5U * i] = v215; - state[3U + 5U * i] = v315; - state[4U + 5U * i] = v4;); - uint64_t c = Hacl_Hash_SHA3_keccak_rndc[i0]; - Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; - state[0U] = - Lib_IntVector_Intrinsics_vec256_xor(uu____16, - Lib_IntVector_Intrinsics_vec256_load64(c)); - } + b0[167U] = 0x80U; + b1[167U] = 0x80U; + b2[167U] = 0x80U; + b3[167U] = 0x80U; + Hacl_Hash_SHA3_Simd256_absorb_inner_256(168U, b, state); } /** diff --git a/src/msvc/Hacl_SHA2_Vec128.c b/src/msvc/Hacl_SHA2_Vec128.c index 18f9a73a..02af75b1 100644 --- a/src/msvc/Hacl_SHA2_Vec128.c +++ b/src/msvc/Hacl_SHA2_Vec128.c @@ -42,10 +42,7 @@ static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha224_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec128 *hash -) +sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -298,7 +295,7 @@ sha224_update4( static inline void sha224_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -313,7 +310,7 @@ sha224_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update4(mb, st); } @@ -323,7 +320,7 @@ static inline void sha224_update_last4( uint64_t totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -377,13 +374,13 @@ sha224_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha224_update4(last0, hash); if (blocks > 1U) { @@ -393,10 +390,7 @@ sha224_update_last4( } static inline void -sha224_finish4( - Lib_IntVector_Intrinsics_vec128 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -491,9 +485,9 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha224_init4(st); @@ -509,7 +503,7 @@ Hacl_SHA2_Vec128_sha224_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha224_update_last4(len_, rem, lb, st); sha224_finish4(st, rb); @@ -528,10 +522,7 @@ static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash) } static inline void -sha256_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec128 *hash -) +sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash) { KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; @@ -784,7 +775,7 @@ sha256_update4( static inline void sha256_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *st ) { @@ -799,7 +790,7 @@ sha256_update_nblocks4( uint8_t *bl1 = b1 + i * 64U; uint8_t *bl2 = b2 + i * 64U; uint8_t *bl3 = b3 + i * 64U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update4(mb, st); } @@ -809,7 +800,7 @@ static inline void sha256_update_last4( uint64_t totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash ) { @@ -863,13 +854,13 @@ sha256_update_last4( uint8_t *last11 = last3 + 64U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha256_update4(last0, hash); if (blocks > 1U) { @@ -879,10 +870,7 @@ sha256_update_last4( } static inline void -sha256_finish4( - Lib_IntVector_Intrinsics_vec128 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[128U] = { 0U }; Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; @@ -977,9 +965,9 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; sha256_init4(st); @@ -995,7 +983,7 @@ Hacl_SHA2_Vec128_sha256_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha256_update_last4(len_, rem, lb, st); sha256_finish4(st, rb); diff --git a/src/msvc/Hacl_SHA2_Vec256.c b/src/msvc/Hacl_SHA2_Vec256.c index 4098d4c7..c34767f5 100644 --- a/src/msvc/Hacl_SHA2_Vec256.c +++ b/src/msvc/Hacl_SHA2_Vec256.c @@ -1541,10 +1541,7 @@ static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha384_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec256 *hash -) +sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -1781,7 +1778,7 @@ sha384_update4( static inline void sha384_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -1796,7 +1793,7 @@ sha384_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update4(mb, st); } @@ -1806,7 +1803,7 @@ static inline void sha384_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -1860,13 +1857,13 @@ sha384_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha384_update4(last0, hash); if (blocks > 1U) { @@ -1876,10 +1873,7 @@ sha384_update_last4( } static inline void -sha384_finish4( - Lib_IntVector_Intrinsics_vec256 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -1966,9 +1960,9 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha384_init4(st); @@ -1984,7 +1978,7 @@ Hacl_SHA2_Vec256_sha384_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha384_update_last4(len_, rem, lb, st); sha384_finish4(st, rb); @@ -2003,10 +1997,7 @@ static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash) } static inline void -sha512_update4( - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, - Lib_IntVector_Intrinsics_vec256 *hash -) +sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash) { KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; @@ -2243,7 +2234,7 @@ sha512_update4( static inline void sha512_update_nblocks4( uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *st ) { @@ -2258,7 +2249,7 @@ sha512_update_nblocks4( uint8_t *bl1 = b1 + i * 128U; uint8_t *bl2 = b2 + i * 128U; uint8_t *bl3 = b3 + i * 128U; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update4(mb, st); } @@ -2268,7 +2259,7 @@ static inline void sha512_update_last4( FStar_UInt128_uint128 totlen, uint32_t len, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ b, + Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash ) { @@ -2322,13 +2313,13 @@ sha512_update_last4( uint8_t *last11 = last3 + 128U; uint8_t *l30 = last01; uint8_t *l31 = last11; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } }; Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last0 = scrut.fst; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ last1 = scrut.snd; + Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst; + Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd; sha512_update4(last0, hash); if (blocks > 1U) { @@ -2338,10 +2329,7 @@ sha512_update_last4( } static inline void -sha512_finish4( - Lib_IntVector_Intrinsics_vec256 *st, - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ h -) +sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h) { uint8_t hbuf[256U] = { 0U }; Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; @@ -2428,9 +2416,9 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *input3 ) { - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } }; KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; sha512_init4(st); @@ -2446,7 +2434,7 @@ Hacl_SHA2_Vec256_sha512_4( uint8_t *bl1 = b1 + input_len - rem1; uint8_t *bl2 = b2 + input_len - rem1; uint8_t *bl3 = b3 + input_len - rem1; - K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + Hacl_Hash_SHA2_uint8_4p lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } }; sha512_update_last4(len_, rem, lb, st); sha512_finish4(st, rb); diff --git a/src/wasm/EverCrypt_Hash.wasm b/src/wasm/EverCrypt_Hash.wasm index a5f05d9df8aef198166f523bcfafc86a034516b0..1447feb38afaaca8ed624e0dc44b1238d670b653 100644 GIT binary patch delta 10431 zcmbta33yf2wLW{38!`dWf{y@xt}XKvLK0>PxghhbVig2L<|G7U5RlZ#6S4XhczA9AFs;^Hf*njQ4&pqc}`aS)=$9K7R|7)*lueHzm za|3T(l^?%SqmDCf)->Tbj)*QRD|7BWR!0c8zNouyXlr+?uJmi z(6~qDIW66%GycsjO|6^JDz*0`6KD3CIe*6N7H(CjNiw!`oz{B)d}=+>#{F|@E78`? zOlu>u-R@~EL_54UsnMZ*u6r@9StvI*-)ZmuF|A1`w?juK7poQ`Ph)5}`R>n?8n}bg zTe_d8-|VhTzg>3BatlIP?knjnMJM-4dY$ zC6mWapEP#XY?cjW1(#~ql*4Z0-I|kTPMqir*X!gd<7Q29?r}?M|2;G!?P}%H=#)BR z(nMGH)vPX}Yg9BiT5II$kI;Zta&ciqE*257xTgLL>2;I;hpcnNt)83Zu00rX&*j|` z{k-|*P?NBu+SMqec44P~QeoI>;W*(ksd6YMvt|Pix)0|x%5P8Y!%kPnp`5TNRynZA z2|EL=4Yg-+NfEVg>jaeCmvZXXYp-&{r1B^)=8?j}y=hK^2z{G%x-jZ3T*RR zI!-49$m0NcY+IPGI#9OiNF7wCaF*(fpq&FjI{}H}Fxz&FEv*yYQJvkLE$Zc|F0kto zChvfh3zSlVe09nlG%B)oUBaSBb>ZL*h3ZPVsvC7hb)MA9tgr3>a9`|FzagKu8+GRt zu(StfsJe69+f@PL7KH12al;|igL=fO5HeM0T}Xwf0?yroO?$wmN7yecBR*gu=jYbR zxiwO#!f0Ag==BVTJl_-sV!dp9Iz_yMfkvS?lop5T#G$$#D#strs3#W#{#23*Q4)nZ zI!=arv_;K`>V@#V;==cWZa6f!kXGMU5iWhg+1yLEWmZ^)e~DrIVuinj0%jWh4ElANA#P^i%y2 zZ2+PT2(;tMwUgg0%50_v(m=?QN{Vp#)u6BuH&=scurD5hYzBE}8%q6=jvD64Xc!H_ z$%ci`I5eafr{kFYDSaq^4nT@(0AGlR8jcKx2hSUJaNgnOyl~kwLfK&vCn|?T#z~fz ztGjMc)0XjiY*`~{8F65G_YMBEX`G_Nrb$6lw!A0NpGh}fk1gZR1j`Y|T_o|O@rrC3 ze|DJO8}ETgh%+MUk&-O^P ze`Mg<9tFfhWR#s9Mv1-G8>Jq=&>T&pV;;#|>ijV@#*B;6>~b_*jt;mS1H@xs407hv z86|dEI7W@dEPas1s&Sa555e%EfZ;eGJ4+voEv*yYQ4g6}Iv#f81GDr2Ab*yQ)ySKr zVN^#1q$4ni{K^?LiS0nI24K4^v??H4w50hsN;S3<#gXyuQb;3L9Vbg<| zu$vj^K{1fugHnyW9?aw(%;ezUvsqNEX45PbZKcm=a{%1lliJjzM>xhK9OHI17cu4r zhGj_2qj|AHSnkT7PxDP7=CSEK*vt$2+%+GF3o+lG!6I*;B(@lC zBk&Rlc$v&TDPl=14xtXTSPznjCb0+m5UgoEf<8BzqLZ3(%47Ik(pX4Xo=uOg)gTH(VSK=Z&wJW zw-v(UwXhIQ`7afGZz5_HC$bIh6&^Ah2!u0PPx^qY93r0J0THJE zY9x}VsjU-K*_wy_s)!#2=8v;^f~q#xy1pu#GcUvke}b(OG_`etDqDLejMyV0_!>4( zP}Sz1u2p4oroH&!YuP%eNvn0S<8|qAEA$T8ot@GXRvJv6)>vUMC0eV6Xcs!TiaS_0 z+oQoJ1>rT3|eNyWMbD=B96|r$)CUHTfph{d` z{u1FLWHGdGS+Mj+22^+pho}^G@O!EpDyvec4FXenqo9qxe>;~8HVN7!%s|?x18n2~ zSo#5=0?bv~#CM&aDNmIH4BjNvQ#h*HEa)krwg`;rt?XoL(8(6T*p6yw#(?#xG3@w} z=%%)@{WfGBdn7gp#{S4gM2)?n4SJ~2 zTCa#;76Z1J8p95mHFXOQCAn?0>$R$a2ep%Z?ZnEw=ahI<0 zE;hx|uQF743sv6jwb4`6ZQL!?GdPjjBWREB-)rMuL3?c*_viq7H~^M@0H^?S8~1u` z^i(;(kiA0f6I3PCenI<$I>3EA$X*Txy&MpXZ6Bc-1GbME!_M^a5ZfO@+Fpg|8Nt}^ zBSLEI^$`GlJf!>hfKWdZuuz8uVS!42;_y#Gx}8TDsiPd=XfVMeg0XEU3}e8yQ)Ae1 zg3VMl+gArkA*7B8I_6i{@^-;-LC0-{kLd~@V^b{s3PS~$D}3Cmu&2rujvN;%3Vn5g zEl&goepE1K%PPI{ncQHX%MJF~++d&24ZeZV;awyZAu=C3qMqdpo=wOAIF8Cs3PN6x z#LjKQGh^oWhHo!i&L9di4HtL3m-y()%xbdjEGpc==mr z_yHg&pcS~#>Lo!ha4Xd>0~^#5=nNO(mzvS(5V z*(X%+9A*kvEFdgr8531N2A=#CCzGga93&{SgGiW3E#AW`+Cze}@sOabQ(yv&ckpZN zAVJqSNKh`<2V-82_wcIrkf3ZlBq-|?5-Ni#oYfA3y7Vhu{Dj&Jr&m02@WAwIPZ~T0 zz3NGWnbBD-MGMi@m$|ET^E}f34?%cggxD+d8mEO9ZVkO=t~WMeS8c90rWn9Z?WBH= z`i;0wdLzwK)ui#JP`~BVs{a)9TcLi(r15+9^828d z-wDQ+G@uy+C=&lTp)u?XX}rbuZv{!?4Z+wa4TRL#BMku5^etWB5pM|fUrhcv^c&3r znl1hyfYjUU`R!n4kONz`fLRP!w$K=M$gHV5COfOuJFr&ovY&Uc@@S2T|2;wP`8BpZ zd|%M}(BmGyr%U`En_}sg7%IGl62I^D&{Netd|#*!xQ7=6UGV*TJ-jIBqV3@Y9pC~7 zz|s!@6=3e+MX!gRs_x-Mp*|G!j!=IT^r286aS#8*Uj7vH@{wR{dkD=KuszfmcBY3P zv;D`x9)2Jg`#nTRjlCWMpobsp9)kG)Ea(HF{#y_hsPtK%`2WI4eZm1g2`2a#!PvGE zhB07UsWI$0!Di}Hw*NFp7a?^?&?UdZ9`RomblFz;lCJP2HpSAfFjRoK!k4`Yd#YUF z$Yr7a3Vrn%TYeTKg1-vJZ268}`NVIqPy7b^#BZ=q{084Z{P51i|Cv5^M19T~e4da2 za2)Yp5rn)TO$vOorz_9`=i$Q;{}+6sFWKLhL4U}D{X=MpKOpdl{|hr2VTq=HX(ppi z0~vD3Zh8lWOi_PBj3DuUB?zy->1q6xP+xNj-*5`w1Uu;w|NjWW3tN`>|Lzn2{|ds( z-};sx0D=NqfeWp!3i_H`ss0hzpq4=2auNQaiT~Re@qaDos!j?XaiDK(wZG%kzKgGp zCjRf@h#xbBD;5xzBO6Wp0U0Ro8Yh#e3(mYznH?D7kN0q0dte?6$c+c4!GKVwz$6%u zA%*X?g9KgUAVC?#kGztCq@tmFV#`T?K<%#*1?+7a)mYSyTbY89VYt(LS(s>dZ*;|cci zM9|CQlCfnCXvTnbsWI#fYph}WHAvgzRazk#`>cVG8hfk(fTFIEoYK83q*^P%{AV0m zCS$)fe^LUeb?kawFg3`5EnmPc25dPsh8b}}FD{Vd3)0N~(_qO8=-SgS$1(lL2 z;gjjzWnb7v7%crdLWQ?b$4akJo+>9@QYqC2NgJfv$nW7ysIs=&Bx$`=Pw_h*76>uK z)Mj{ATR6rRNMlukDP!-uF~Le>{^z-l$PEBh+G4A`mF>3%?GcM{%zm@Qo;@~Uzg1Ux zFjCznX`56}qtop0xCC;8blc{%t6e1gm-sUz^Lj&h8n!QQ|W*na|GrLp$}0H8NVZEvgDzB*`+Sirvh5sN*0 zY{I@;cVY-qJtpaxRL8luQC*P}?D9nL*ip&YHW!*PV4JJ4I$@f7P`U?mbLu=R8%s3z zjH6CUI%z6=O42D?;gjqSTL^`vUty^57AkzotFWi4J9|p1(~?e0^&G$Z6@FgQvr@ev zqto(^WYlY|^zTuTp4#C34A3T;Q+QU~+>-y1hBxNh-}$$SruD2$6sA9FTSg77iH}PU4Pew^QdyEHa2L;M zhZm_^=@z<`8d9yyI~rgqGP}E9EL`F~T3YH3o6-TF)7%~78b{}sRtf7vZx!czTkibQ z>F&{m&3@43g;~bsg;~bs2ea5^bn%?8emI@#1=HN2Q#!iU3%-w5&u{OgqQ%7>qSXtU z{+~XgUo0F^!=}G?acT6!vL8``v}GpWv}GpW)Mc>}=q;4MZn>$;eq0GkD~!w13gdEG zg)Twu)jy&Hh3iZS3fFxfEnFKbffl0#h3kHx1ZC@ECFodL8lAE6N0ea8W|Qxh%_iT? zn`0%=TPT6uayM`O;S#*Idx?8}yK#AayLRbb+g{}?Z0z9UtlLk0WWXqC73=i}2; zneL_^3|uh${~_u?dFIY$^PP>pw!1*y+*;M4I{c_)+DD~Lw87rSa2qYyAFS}dupe}= zM~w+XGSbt@fxp2SX=X7XBh@VWXQY@#zl@q@(KjR6ECywS%wk|hl3Dc0s9_epGo)GE znIX)gSMv5MIsFC! z64sPtzEHGOsg%lo(!5dOUXM12bYL?g&rL_|CW!g8g7-7?`MhSC$0s(+Tt2f|w&zou lWe%U)EZgzP&GIfjyIJ1Nrx%pTj~Wfdx8cP(C|>UPMw8f{`lreeq5 zfziEB?jKH$?%h8oZDz-;<*1xpu$G|;_`kvEt9DD3?1*iH(e3t1E{<5-W?-`IF{7u> zD^-K1Om?qhi-vXVbDN4h>{puydD$aMi2Y()$;OmNQ4O0`E)>_+)%ybtjZMu#+7jBb zbz5sV5^Zbm=#0g?ws*6neOzuNEaoU;_w4y{6Xwu(AUT>GV?Wv-HXnR`_rO?E3bIaz zDtG9Kmfg?pNsf+5osaNb(=!K>Nh!uYbr7_j{ouH3?6xj`c~twpV3!ruu|Q!xs?!b@ zeqgu$F%6>0@{zt2Ykp&$HeF^i*>EL2f#`aSoEuriE>&@17d?(B)*U3MDpk0O{w&cd z-ElRd+N*Ijy)4nsB;AwhR`cBy6A$*V*&9V{qQYkKVlS2aY^Q6z1??Ne-UiP^Bcz#=ARl|8$7KtaAVUgBD5e=Dye+r|lz?o?vnkNi9QqdShNW84lp={! zu5BS993oqA=)PK8agc0d?u6gm3gVM64zm{%rQQgRh!$bqA`C6UIV~b!@fMME3z5WI z?2M2oj*>PUg@Mx-(vDk62W}M?VulOpgbRrQWh_V8Hn7ZDWjl0hP?e-wSTn|zjp-K> zhq8E1Svy$w%Q|%{Qx@lCaXytJO1f|t*$x-d4O_BX$IA2Z4!+qA9yUk8Nexaj`~7KE z=_!#xanK@x6S@|?xL0VA;1vm&Ad#a*FIZfQ-n14XNte5q^x;0zkNfUxu@m=@$F;xv zW|h?{nL>C9K^ZxP=-ZTXk{rmgb(q+;k3U@o7ppELA6@t$Y~_k7P66j zCED!+OIB2`{Wc!RgF}5emJi3_<56y1dUe8p8raC0a`w(^!`kABepFOnh^^TE{x~Vd zYj80x!gi;>5Fl~}SnBLiW|}0^$?7m4#D~0vHPzaWs{c#<>V!u671vf;0tg8 zfAG%igD;&gf%|0(3*~ynp0B-dW-5l+>sNDLpAT1JANGg`UCAG`)_E`22A38TJBED4-x9fKzv}%mhZ3$zNnX F0ss%|PniG! delta 360 zcmaEA{nUDbyewlqV|@Z+9aBA!1T*0TbAA0pn|`juoXqsRcrz0Q#>uvf3XDt>`{h}T zjTo3G$1!R$vP|w^lw)KCi5Z$PuuWdhsLjYe@rUB(ON@PtjO>#WnMD~HH& CJ54VD diff --git a/src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd128.wasm b/src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd128.wasm index a647522149a9d0384bb143e836cc6786352c2a7c..e1bedee65cf812d0d0d0ba6dc8cec5bc20ba0fb6 100644 GIT binary patch delta 191 zcmeyy_l<9YJTr5B{X{Xji4HAXNvS3IIjP1*42%=s%d;4oF)&SD#i+>0Jn_9GS7J_P zdS1Ml2?NW-a|(>ClZ6;H8QCV=Fv>BqPp)Is-h7(Th!M#A$D|7+b(k$BIHVjIG#D5h v4=^|~=7120mnFb5xt4jEI80IiLoP;EQ9waJ0Hy##_hyy}j4Yd7SdEwfMYJnt delta 200 zcmeyy_l<9YyewlqV|@Z+9aBA!1T*0TbAA0pn~upkjGT;&lg$_v7?~!XlV>qDVql&e z&Zx=AGP#jaj*)fZIVDE6$qN{@8QCX(RNQ=;(TI_eb@D$ZT_CB$Y$?Gm<;bAHz~Fd* u!I3csgh0G30mjL-%*(`Kk^&fVF|vvR3IYN!1sJ+FvrJ%Q-0Z??!~_7Kr!6Z0 diff --git a/src/wasm/Hacl_Bignum25519_51.wasm b/src/wasm/Hacl_Bignum25519_51.wasm index 9f05386ac9d1ac58bd8fa7cb185171be8a0fe2f6..78a0b2968d38b3d3d1a4661a240cb503832e447e 100644 GIT binary patch delta 3608 zcmYk8dtA-g8pf?|(}kEM#pt3$gsG-<)diuHq^MLwpE7PKk;*OCIfpo-_9wqFP2*aI z$#s`aQxP*^BFU|sV~&#<6EloCj*`(h>v?|v_~&_l-*>;?z3jEt-g)x9;gjzTciQRe z=jRt%*%k(^DH0|^6t1^(cJu0`XEuA@^7NV6nVA_`EBe3l{Be3(`l__7j9&VM>um~a zOoTyUgS{}+mPnV*PF0EGR$6MXjUZqpPPANu&MVc2^;zlbmo9XeF=N}0cO5W*lGdr?@!3G1PmKM zm}3PnALx&F3%9P3U~DrL5IqRccQ7IT1Ax^K!dzRx5Ie#r_5ddb!qTC&5?md_@dmk0 z0Iy+$jn06e;e_oY0AVhKT_XW8qX>Il0m-8YN5%jaj3rclC_$6u)*^F~d!o$!?oKi< zx%l{os!oqX1+Ga~-of09NIASnh$a z_5{M}34}E?2=mFzFPL|j7QwK3he%7iEyTk>mby@0j#6W1jq#Ko`D(q1v&MC*eA12# zto_vIQ@{v6G>4b?@o<8GXXBk2-#TxkL3&Ht<;vTa7Ss*X3@`K@ycacn1`bIauwzi zR#Prt14vy`2=PRP9lStYnW$F!a8*d z)`wbo-!+f4*o_I5uohI}vmapo$h4_KC#DL!5pfz;<7uqrt;1Tobg;hFVU;eX#~D0l zFuys2waU)Ia;%26vl`a3YFPch#pHBm0rL`5zXn!M4Xo9*usUjCh1S7ZTnFnA^C8pj z96EE(!HPW(>*je_LG`e{s)u#49y4#5ei!hZ!_;1Yb@ZaNw4=UD)03sJfxjtLmsr{^ zv-DiyFN{N@MvhlC#>h0i>L_!}RZp3z%rBVDOrvYPkE}2>PuyfDvdFTIm;V5?AGdR0(?j&Ue&WEAp8+uF~~CVMV-wRs9B*bq}n79?a)4tC)W3diA*5BDDWp|g zETmO@1F7^8Eb{LRXp0b1RPyzNG?gAbAys9OK3wIpK4j&oJ||raSklz8Pc8QisNXe& zu8TDif%>u*n+R;La@JJHE!8bC6Wm~jMtkK#Z&8HFK5Xd<%+<-`eL0!Zk8S1i_le;a z>ePn*>QtE}+i?Tbsef1zg9oZpIo9e_i4EJ4gVd?bgGH5G(f0#^KdrjWLj+D%(b0dVPXa_)tTr#oT$qkA@CnUrOJivoRR9(l~Ke3 zS9R+CXyJt^%duRdWvn{o{?b?d z*LV2gt(9_r(IQJ;fRL9{xf8(S`!hhuE2(&mXIZ9}3bp()UdSt|JP73JCkOHL8-sZI zF2U3zgL&-R)Y709y%09Xhw${hCs02F>Ki6fFQ3TCcD2|}A||C&s}cM8i&wVYDRvnkX&rc$3DM*U_Ob+>Tpv%={WsijFRrqkFAokl$< zf_hy9Zi%uYl3RBpl6q$(_snfN%M!I5QcL@EZrwklsK?LX>J2lvx>GdusnJ|$i(2Z{ z(xVoinOxl?hWg1E>b9}e*Thoa7fb!VT5M*KV%1VOi+aIq>Lzj2SH@Al6-T{0j)G@A y%QCebRmJJmB&znQNX%2PQMCvmV>FrR<6}1>9u^F61-7gus?rgGfGx$Hal1RA# delta 3584 zcmX|?d0fru9>+axld=s-7%f^-FS~4wrN$EB-fO#~ey{u(W2|ME zkWM)?ikLxzBx~+9*R2^7GmN>el5wW{d_KR|>;Cik{@&m7eV?cEJkL2j`_A;)ccwe+ zhKPKTpDETF<;&#b|HC*xzc6T>#f$PN`20yy_4Ot%K~8W4nci?TT< z!F=E(K>Q#;@4z>Cz#Ke zE&=Fd1;9!Rg!M2GR%8&Yk|0>$2Vur67#Yr7%d8HDRXG-xR|u?qA+QV~u!e?WK7;u= z^B%Kr7_1)Q($Z}S_coEGHiDO9uepn2KG7gwt?TZhcubN{I`_%EJ~2_8$eF^EkJe5; zh9@6O7#|1dh$D=M2Rx1^SSJAL69`?2RT8R_c-URjaPsGp31+E)OR0pOX@Kw32v!+@ zTN#AGnKcrcGkK)WS%B6o!l;>m=Q9cZvjH8m3FGDh#5_Xue1PQw!nABa|AmAZivV_u z2@940+?EoS=K#DvnI~_hZtpTLl*Z*O#w%HTb6Mu)p(tN`iuu%4luK6wX09QWuLUg1 zCseNk9eS=HWW$r13b)gj3_Z`#k z2%eWQ%a7pt9{UQG>rq(6M`67>3d`mgCKH%DnAe%+U&AsggSD;<*7Gu0QO98|IS#9g z*~WA_fllTLSg9vrJva$#LOHBM<*=@nW5(nZGL*TRdFB+XiV9dC>g0V_p6IX}<0@gz zt;AoZuZTkiaZGsiv1S^+$g83tJ&}~?T+pw12fpz;1EZ4iRrrd>9z&vvohxv**^4<&i zJmfw$Y|njcnE3--gjo-;VOt(xlP)vEURx_R&CWuob@swaiEkbzU;4DULuSrH?A*(T zx!AdRkK}qn+5SU?gwTJgn_k;t9KXV|j4Ly&i z9LRbG=>H1`qJHJT;^*iMeZhg`-vE{`IXHH^}VM zj?cKT9iOqN9lLRZY2Jb7am=+Hxa=$6U}s*x!OmoLrpvt03G1g$?2N-(WIA&<^X^+* zrJH}iihc*H`W-AA1FT>J=JS|U%-@+K{)9EG3sy;&w2J!*X%#OK(kd<%(ki}%RC@{* z`F{qqNeC&b`9?yTs=-J|Rh@4PSG{TsS$%HINjDRgSz0-ul}9Gj@0&u`Cz^>6V_A#4 z3v91?)K;{%%bS8=y_Cw9%&a3}icIkT$h(u&9zN27Dm!uT{Tsh``CJdRtK=ak-tq+2Te5 z^BshWsR53{5A8T7Q75slvzQ8;=|XfFM%3rJiXgPB+}K{}u1(bsCvNr7rXGzDzL>Ha z#U+|XX;V&~oOp+DPS1ZaPKTOi(AEe#S) zvg8E|c{$a4!92b{f`z=2s_$5qrCKS|%FknkyrSyk5T1TUC{O=dC{N!ljCxEMk9~_) zE^EaooXxS}Jbk}$)K8D2?l_+M`tj6D$8)kpE4C9zsan}Nf%=XJ>b)YVuZpDJ97+Ap zND2WHSypJJLMtyOQg53?efDJPwY>rpYol>X)MYW; zx?3^S+he$Ap0O<1S~;SXmRN4xKjNsTOy%mAr*d_tc?_75|U8x_1Kg zQwh{<6REFGq<$cg`Xj9jOd=&}r7(&5)@jtcCsSXMO#MzW_0D7pJ}E3qwQ^i5PgAJ3 orc$3VoqEG`>K4={u9bSNn5MHCmQFn|1G@ffhVV4`9|u-6=l}o! diff --git a/src/wasm/Hacl_Bignum256_32.wasm b/src/wasm/Hacl_Bignum256_32.wasm index 05db6caa2c6c0790aead3177e03e045f0525f046..5fcc70ae85583e1b2246b79018731dba44111943 100644 GIT binary patch delta 1148 zcma)5T}TvB6y9@ZcgEb5-AGL(n=O&m(N;rp^CzA1Cx0SnQ7dUvl3mmNv7*w9Hj)a1 zP`mVCp%o}eKWN^<2bEs!A&d-ys6e8p=p_^%gy`IroynjOhRZ$YeE0jlIp^NF6_{Ir zzN91}6beD85yH!55{hg?@I3bl&<_v?gj$FU0?i@mV5Ap`0w($c66p|##+=;-5rgd@ z(FT&jM#wb*B&LW%1T%{0WjPy;@$G}u9c;{8K=9~7WgI3T){nqotb%x6rDNt&MmC9m zA!snx%N93V1+%0YY$ls1n`DvoXWs;NH*o}zq9nNu1=!QAUVgN%f|F;+-!Nsd^{$d1 z$KY4$l1?3M=i-p-A@obeuy$-f`3%x+XBHdItkcMt)scWY=<%O4(ZU}TtXX z+=si!9ESCy za@R*4n^&vLPSxtNRds*J`s#Jruk|y!>}11zeeixa@8Yr>yGYYce2RFZYxCpuYEv1y zm~ATJW0-AvM%l?;4IG(K4A5+ksHG2j@KVt!&jg+fcyU4!z0;x(&~<7O3+C1lveA^J zMy*cABG74{Yi%OM;iQW`D{b}%k>b4{F9aP5aK~=~FLCV-I`J`12k$`?JJMdG_i5_% zJSy~_86@#0Ta;{2p-nXgRA|+-Zd|y9cHf@kbIR-pvbyuhunsKC>~KQ@y5<*4l{p=L cJm44WVdwmRYF$+9;t!{}%gF~jcxe~>20gh!E&u=k delta 1135 zcma)5TSydP6y7sCJFafZZseMZHbWw-tF1-g<|Uo-k~e}DwURa^*)`pZ-4%<4q{JZ9 z4n0_C1xnHd&A;%$q*n{V$eyACiJp85#Rnlezv7Gqg)q#_f6n)R-#6#`XKoedR-r#B z79tR7G)5x)0Rt(CFN!Y$)(CbGa10bMqADcfH6a>AdOUOMf9S9Oq|C zJ8VXas2UWLBFQFMVgot1fZac zV;o!@_WK0XblpvLAZZ>0CNDKghF z0dVZdORagKLIllJ`KE)Q7mANPtwXYJ~~u5?VF^XAwMM~;@uWKK+oAJYFJuFg>B{})x4gL)r7AD zo<9qzE;lX(Y}gzO6UC<>9S9r_amVjNujt+#b^~n---jr6yuDhV)AYG{Qs_HBB-qW0 zSvIK9rivjIS~abnE4Rt+hf6%Ctd20NyPOPLz_P3kFC>s_ey~)T)6vBj{9rxmoc~X+ S3#Bgp<1}}dEg!5`c>RML~*FMHAra&}`g%N7N8u2p5_mKwBo?6LOkdE6xrxZ1ZgK in+OdGAPqnS@(jrR-IBN9HlfSaNR=_NZ2m08%?JRQ0aXD2 delta 364 zcmexo{?B}ZyewlqV|@Z+9aBA!1T*0TbAA0pn}b}5IhpBs@n$9rjFa;j6&RT&9+YP> zHez6&Jb_V@k!A8OMma`SkeHzv1KZ@AjM|LsleHKX8963zU=o?!&7`#1nn{OSoL$P1 zL4$$8k;{=O$B`ok%*hg9oLtAd6fURf$cQYr*@~}>1!&qvAyGz^u!H6;l!=QpFDx&|ngBU#qz5Mu}|2g-b%l*#%b_|Oh!{We< zkY>?r7Zcd7YQ7L{5;Rj4M8}1{5Oi87LPbHU@JS02ebiJY=6gPfU%tqUX?}vrnkV3= zHT_gOMGLiSN*B8U8K5iRJ{T9cSExOg0|!(H>Vi4=9^9c4YHAJXh83^1fKA#_up+bz zY4!BEY6LN19NY=}#2{Lir}g}5of@p$QE?D68Q*vOk<~QrT+AJ&flGM$Tk`` z)3JH0p)6_1;LfyS@I_iA_)|KsqYcpRbj}`6XZ=EQ>SEUB#l3VYCJici3HRT!#7zfd zdG%xy=hIC^V5fqC+cB`0NGf-{uKtF%jbm0>B@Y>@1h9G28( zouk_L2;@ex&Qf~9ODIQGD8(o8&O_Nv^foaH%4Rl#UFLG|hj|XDEMuok@`&Vf$!kgM z=jAZla~^=D7G8D5!mD0c*w-y*S|v>e*4qv5z`@*`6us~~l=r^3kUh$m22vF`e|}vZ z;zlD+*}vZR7}k#8bsO~mzvst7ct*1J3pDdyus^kB#~~WZJ^}v!3%*5%b1@2@!Q2u) zg!kQSp!X|d5slyOUFyyAzMFQq=KLP;Q$EjRQb99#v0yhC7igug&o+kxCq}& zkfwwSb4s}IUI`cGt!f2_SM5Vlm9@{?mtWFJ*yBB)LN^+%q$}qcEht|P-YBmF6K%ZV zCL3?~#m2tNz5?vCvsN7WV1tA8YX{HkOear#t&=C7ob23p@}4Re>r@x(ddVKiSFUF` z*udI;KG?d7Oz?aKH;b?2O^;S`vk#RGprfi39IDz18mf6@H&&NT6(%3OM(?L+6hHh< m^!q7c!E~g)%7P^KBIr6d7oU@QR5GfDeMb%cZXV0Je(@Xo-k-_< delta 1404 zcmcJPYe&%L5gi^a`cAob7gxga$*XyXibKuxE!Qdez|s(SHw(>tX=FFla3uAg zCtU$=OYLDnX#oL}PS4zG9dA^GbG!OIBDqnNwLj`IsEKYzms`=p&_219?GrSwWM3MAHjiw`#gSS50ro2&D9KUsiP z@D3E1=pp>yO&$NVApxi9=|1M&h5qlR1FoUy5;$2zJEX0EEB9WAcd3f`+| z08=bfaJz*HezlPAu&xJtt)wMe5!hrSJ!+$Ujj+?k*V}31xt*LLJJpmqNT)kUHws=7 zeCK$D2kWowqX(<4$^m<-s9BPeik@~-vk7MtXmgc;gRY&RwwhLUTXp$dnLa=*R|iXS qDHy*K^3cA5f$ybFQ7mTeT-&TXao7Y&|$9@C&ucIvh diff --git a/src/wasm/Hacl_HMAC_Blake2b_256.wasm b/src/wasm/Hacl_HMAC_Blake2b_256.wasm index bbc821ef32b6d65835c88ca021b01effd2860dd8..9ee78af8198d6c3580fe5bb2bbc547ce9ddfe612 100644 GIT binary patch delta 79 zcmaFH{fv8pm{2`qeF7tbsAH<1sF6R}osoCqIZj5#i8mA&nI>B^%Ci_7F)&Z;*4!-3 ZSjou9xOplQ3y_*Ti}?bOlg(1i2mqVy6`KG6 delta 80 zcmaFH{fv8pm{1*4eSHFBJtKmcsF6SMpDZKe#PgC|i8-0+dGTf@3``T(DKIimJg>A_ anz53Rk!kZ(CKezyc^2~pASaupnh^jBfEWz` diff --git a/src/wasm/Hacl_HMAC_Blake2s_128.wasm b/src/wasm/Hacl_HMAC_Blake2s_128.wasm index dde8629ec17f3175fd1850f94720376d4aa216a2..22fce826a4f1426f89cde12cf1b464f42c1a80a2 100644 GIT binary patch delta 79 zcmaFD{e*jhm{2`qeF7tbsAH<1sF6R}osoCqIZj5#i8mA&nI>B^%Ci_7F)&Z;*4!-3 ZSjou9xOplQ6OfubgZToGlgU!e2mqRU6_Ee{ delta 80 zcmaFD{e*jhm{1*4eSHFBJtKmcsF6SMpDZKe#PgC|i8-0+dGTf@3``T(DKIimJg>A_ anz53Rk!kZ(CMF;?c?RX6BWdH}jL%dvs8Eo+rL>>&cUzyk3lnG5kcv$FCRTVv3)V@$vlnQXmw{)PMxu zx7Zn~no&V1q*A}-{wgsvB9F4d7h@E(A`>gxfNYSnojJfuSA{i?~f{U1Ed1 z0|STlc_nUxJQ`Y{l`Fic+a&4I7#Z$8?3KFJArzK(c{AK5EiQ$92Q&Nnd-fi?;fBm` z&(Xf)8L!N3Tj#dL03Oc_AM#?&TYC2m^lTkGJkYaYptnD>qUWMRLq|t?henRJsj4l# zM-Q5+y*)iW2Qnj|ePFQns@}Z=8Lxbb_vK{o_uZ&o?%Ci~_%m9uzjvg!2YiP|O2)m? zO;@5+uYP-WZXFs~zM{M5*vb`{1=N;2bP~$Y7*AAIRVQm|>*^aCn^H5IXU%Sz(@HaL z=Snf|rB8}@*|WC+NH!J=(l^vM;w9!3R~$PGNiscy#|B3Fyy})o8w~Uw9r2Qji|e&6 z^xV`pauB)&bv@eCJG8&2KXbEJA2K3tb8gd+nvA5^xTv^lICB&eqL|@r#Yr>uQgpV~ z-pr;+nt072Qs|NKW`$Jk?>mseaLf)F=o^MJ8LuUzV&9>|H+yqhizPV73g`F8YYpji zUW=g5y+=JN*LnoyFNswA;>lZ~C*OVtbYN#>tbDv=tYkbgRyi)lI>r-Y2|OeC7sWru z^V2O)UKbq`otB3ag~;B+!A2GX$s1!R+vv?t4CjLIn1 zg~oA;Z}8T59#y18zpN-(kAk-?x?Z)WIH1@-mf{_r(osxO*A`YCU& z*cu(jGwp4Pj#Ejg9(IwAOg`)>O660+>l1h6p<;6dko^;9MtNlNo5xi1HC_yI&!m)W zrsnu`*v_I^`8+^puIQh*gIUbtQDcg;X_joEmbjdQ{?5toZw#o$HC_^rrZw;sLbj48 z2`VQs(9#)b0wns?Shw1u8Fa3ZXzXaB(IYRN&~uxWZK$6MqI1(xt59un9<>3383ZW^ z;TLmqKF!k*&F9|CWmTG+E@lDL0#IF$PFSi7(iMj>sd6E@(N67F&8D=R4(iZq*3O-2 z2g!D)_Z@&(%{okvb)yh*s6(DZ=g38LPTFX7RxSn*^M_#x06Qj2)4%-7dw+i-SuU5+ zGFq7SvgznRmaV?i)-0#xs1deig{@ghD^U}%HLGmRYFdq&5?j+{Yu3;j)I@Dfx2-vs z&P7eBtyybp*3mlD%&;}**_!j|eAJZLnhR{rg>)fmVzy?ztw~cFHRZNugRQxUE<#O( zt=VX6E~bl76Sp;+Y|UocjGBb4*jPH;xWEUl zqQwALIJ6ei^O)9Rx{_%vrmHlqjs6=u(SC~$>;kae2d<_i0M|RTme5Y7wS;ytttE7| zrnSj`;Luu1&u3aoX*bhaN-tnq=g4bZ)xXwN{p(!S-vi_6 zNeg*>IwE`1C2}u$)C2AJ?PMR|>m7JM+Gk*u8OMF>lQ+;lV2u0NZ;PYy0NtP=I>7Uh zVfT>^9ZeI06aSt$ zEk|fr*U<+Nx|zA& z#9VI*=jtlKad35|xCO6_g6pWm^|(Ai$AK|iZ?VOk9HSE&qA}(=%3MdoxE`0|K%W#K zeNs5I$K|bp#u;Y@^OT@dBAvo;;?d6Id7B`#(JkmU!F&LK5q^uK7y9EW#%TeB+u>HL#|iIR_Y(cL+iQT}yW;DF8qU{1!_A)Sb-Z&H^5HDIUNY9@laU zT^^Np3qrj{(A|m!0ILcGWo;vOMiEC{WEHA39$06BS| zAT-dmbf1y}0HnZgh!miAKUchjIlaW?bia@<6?DH~tmY&i5cGgZCo#Erbn@grCbs zKnnbZO5tkMqbxwq`+^8 z6rlG6SG^!b@W{Vb5E|(g^jgIY0Jz~dgqs(v zzFvn82g|S5Gi29aPYQaSkZ%z52GQZgCeME@{0P}_4 z;`svVDdzE10gr!DJb*PkuHzQEJSsmb2=y94A5|;>fCYYwS)gO@cRTj6qRxMuU;6}$ z@d;Oqj|utDg3ugTBgV%aASXX52n}>CeNxE*0CM0rL=MnKV?oo zbvgY+$o~@b6Tw)`N&ZaG&vb;xLq+(X3qm8^f_|>J0RT7rhHxuT;$LvD^UUj){MxTr zmS4HD{6ffI3qo^XO|O6H0Hg9Zg3v(M(r=U;03Zi`L*;lM$lJ}+Z&{8!^ZFgX_IsA& z_pThj74i>)&>UDJ$L|~QE~u)9QX~91N5Hdia#=^Kf0Wr74p9YJu4V% zM*mNO{-mQk9xBTJSr8iO7W8Mu4FI^|H-wuL<^O^n2T}fi7_w3RuY&#}w+im z(1n=1lM8f4QA6Q&MGb{J6g3p?RMh?|9ucC&+ixjm5JX{OHMhi>n2w_V69rk?@r26j1Pk?#}LpD|QQikj(J`e%X2P2r* z2P4jw=*1EFvIv1PuttMG(*im9P=wGxBj_O|J^%=b-{Lt1>S5;bZ~>2(D;~fa9+1rf zIr&J0&_E;T5yb-lc;Gjb$D_>SQJ2Sk5&4P;VN8HEJTMv-$jQedga#Txk0~AizyrUb zJRWBrj~DQGrQ!js;qgF(n`j^>pNJ3|Xaqf>cmMzo{D$$M@;r3>&3BzV=`}>g>Tr61 z1Jp_Xm8uhvI&;EMhCFGEV)=PeB>4IGL7y;`${AEjGrBw+M=;N|o<~rVei=g-kz=+e z4CRbQp0s?4=QM(l0R@v8E|(Qxi<5@wu}u&TB$#cS*~Y;(?yyZT8f+7Wt!@;?Ho@l+ z=vP{~%e-ThKtvpKRHdI%?5dcn04Uaa5&bB^!|U>*RHZWP46TKCP>&iMfK+sO^?VM} z1y4~WtJ5()9Us9Mi6M zExW-r>_(IRtiXXt6G+RHMa-m`?3J1=0w)ga^k!QG4i?y_&asGAYVGhyl0`_89bP-q z|2oxOA?MQE4zE+rvxxaLzr*X63y}X$vFB}|X7;-cG@HFH&MesHHWG1NBeii|BhBNw zMq0piBK=o!L@uO=dLnNxJ1}$|>4-fNs11DFIaG}^5aL*gOnW_=1;<&7Xb}&uam^W`e)RMlBp z#SE7*!(|~1sg~L&XyHUUHgU$GzQl`6AYYM>Dw#lj%0;z%(FtrR&gAePbE$PGV77U9 zUDOgUIpMTe;?*d&cr{72tgR`kp=#=aD(JjxT z6s^w5^QoD-a`FP2O>1)ULYhO}Ik}#Q&dtd*PKVa!S#U7Qcr2FtEUZIS5FslT|I3?UD~6vyuplr=XUKc<5T!b_M`d!at2Jg zjLN1k>8LEFD3!9mgk#kvUB)I|1~q{U)?eb_R+)4RBgkUwMi4TfVEknap4Y=cuw&A3 z*Q67!Nmp9if+yq2skR9CBA%uii>T$NkPEUr8-ap^Q?6DP;w+Ng~O1WriKqj@-Hl&U(bj~R(AjzY#CD1FQr z_>}UrHSvXc3{yCb3``s+kjxP0i{@l-p{mZ(BJR9ht`$vqIkisE9Btxm?~RGOC}ZLQ zs?fyC9r=ujyC`Mi!P|KzUan2Nocpkt@QBGJ{7Aw(;*rA1B_1R@`Oz+y^P>|E4UcZQ zlB#KOPOhRFT9Si@sH3Ghc!&mCmV<|AqRyP`rkS)n2M;leR^;FzT4-es9-=~fzzSN; zx=}%0tQ!@yhIJ!O-8^)0I+yF>w3h4Qv`(A&jJ(;4KYa=&9*K;_kc=c zG4?KrTp{8_LH+3*C$4eJBSw5FCQ7V1N>MOPSeFSbQ#=?%Z4v{`g9O#`WH1zZ;?aZ$ zH9*H7KZCK4M$~PkgdKM%rMrg4v=u64K!Vlxxh$ds3Pvfhh{krnqbRE$YacI|p`?{i z(&%$j(n227m|X-e@|2OPWe8R+Sn=jhH*N~KI#WLlstwa(`nQXwUu{La=#lwtj84Y?;PubFcu@Le9~z&B2wRf_qYg z-a6yTW|ZF|M(CJr0W-pyf*`a-c{v~;PJlQ;2M3rncmA-o_Fp+2Y z#wUDzneXedCORpG)T-&EILwv;*T;sn?Fg=R@g2ERs=!^hJUm`0rFa}V4yQ;FDuE3r zlGT_516EQrnJjdTpW%Dg4sS3ytJ7n7Oq@~I_7Kb(RHIRldVt6tyBBujXC zt6j>Z%UcdkF%y%fT;8VbJx6m<3*<3EEG|1kM6-&L^=k6``-qd?XDnJw@qNZ54+4kp zfIQrmCXbtn{pPx35T*UQjJ$$o8FV8IQxSRI=1(%+L0e7(Kp~M}Q$EuSNj?f$27y+I zE{+~l-Q^`z5XpNBF{C!_ZRWi$?kPf9(_UP{(`j`>5*YLu&}$NhKir>ezz{Y#S2}^q zQUh`d*7yJ!(Hb~7!5#Fyf3fG1jeb4_a|Y)iQ(%)ylU)n|m!(ve8o@?y zy#X0EDQ3gRlYyuv&NSgRBri+#fT2r#GtyGvjlrxu7`suXY-gJ zLlO;&J4)Dxlp_JsDi?wV?g3gF3ji4!^KH=#9M?!RE~1zAeAj^SlI_mqs^l)x9#LG2 ztZc^8yI{5aCga6ozSR@fkJA`axB={4{kg@=aDVerDvt(_G!1H4e-konuzC`lXv6B8 z%8@CoD-`z=r?r!+^~FWjZrbwtGO`Y^T-N<4OHxh(S`=zYDv)p&Cv{8g_2Oopbfb`Y zVs{_7rL(i;w4~ZpOzN(fZIxj=Eq67AyI^;P*|K(_#w#DC<`g5vxl#2f;e;hmSyD$C zK%5&@52P>IvDM=N$ChHp=Ig=~Qoz-jOW(7tHvuk7b=qkeZ&K%b6I=k#vNtR;3M>zK z72+lNB6vfgCcWA$9sC^)oNC%jHV~e&L`HcL0ELWyH>VY@)mJ#S4@dLcu`kwYuey|l zpi*Vb&B}&-R)rc>ed!!i#mj%~x~K42Sh-W26)r&)tgpNRcYTznQr3?B@s`oCM_kdL z1{b8CshYW9s+bF>kula*130DZa*bj{G1SI0A`|C(#s>C`b^WrD8f-X#>$hI534y?P zwG{JX2r(pN(&`ZckbtH-ISZZCZ zg$nJQu)u?|CUeT6oK=lI;Us3@yv11pL1x@+HS>O+l13qQqY$l`uJ^_s9sbRll8Pbu zC>29$bqrCD681SfyZ*Lb#}FXbdPtj46~~ZTs^J)dEx)feQ^b&9;Tyydf8mSweOsB% z!k4|&S-myR(WsHtv`MdDnj|*Vpr{%8+_1H%As7 zc)GB^UxU%=&6(*Pr)tY7GO=Nt%1`XV#-XGXE(p8NQyaW12wC1;W;bE5PsX9(5%%L1 z{X2L&s&~(h4XItTq0VuyQq8oE$IOjaD?WW}vn+j$1$`upl)9-5Qj!F+Wl2PwNYO`L zwH#S=nzP0hwT419IR-(MIpzl*z|W!~)>om|qrtot>Phg94OJ;;gXHgAV4TL~h3s4c z8vf3OkAIV_1}Z6yAl8IaRL=yl0*^f_>+E7^mhZ6<#Ih=g6;cNsa{!C%h0Y3i{DoJZ zhnu5d|Clq0Jq_0fvqMC7cF36>jm7Lp4kISC6Um7a*yr#zDZ)FeEI#ewGZetV=8Qks z3gD=mpB8d~Z!YRJ1r%K=SV#-`RM(@0YSD=9wbMe`!MZn(=2Hiv-hyPcJV&QH{H={# zgn6}HfMN$4X7K35V&rcY_oLO2#DOlJ130#b(9(Xn4Eokdf|mMgmgNP#!$ZU2J39mn!yDMCGkR-8C>W~IfeSK$E#`ZOgriiT$a*VH8wgt0y4oN%?g?h z;YaJKmDPVe5!;?Nfeo}?UWCRQ!C|A58t|CI#w?-Ecf=cg4jY-n#t;rbUF@raa#3;< z=5sUVbCW-xTi~+r4H&2fVgC{Z2S%~Yf!Sqkr|njwwmS-?vCyX-_;hTC&*4(qjE}-D zm6y{dc?EiPg)>)|12S`UnJwD6dY(c_ybv~5SJHOc1ebNyH0Eq4NbCadUCx{Vm!(}< ze8^>8)-HcUckzhs3gJsTX&cYkPW62m8eB~~<@52}4eGlK=WBPCQ0LFrZlC&YroKCb zI#4gbd|X4<>&-Qv3aZp2A6GC_~q?lgjuej1SD@0fUa;L;4`m+=zU6LiR*o10Brmt+JYH=i@5=H zn=tA7xIB#}ahVf@{|h!UFTWd0zZf51i?M3_<7HnR8!9Y4yu z6Tic}^s91Uwp;8*tld^S+ODzCbX=E1_J>z{wfWnY$&$ACa{;V|xUB?rlC_u#><8?& zr4A6^jH@%5dffMHd>UA!Ujs9RXraNq^OAx2ylw1(iK|OZ5{(jli(08z#?O={~`c}VGf27 zOFBD@Er4LGJd(3*(e#aLaLq4dNUJFczuSr=AI{j|jXB%L=W75&_oo(G!eIU}?VkcmfYZPAPt*GM!j_8KcNN$Nh#jJ(?T?@ZR%KQ1YS zubjb~d=m`VH z^I(A$bA)3@U9NK1e%_07(|n5K!&Bs@gPb@Ahr--6OrN*^tkWkE1li_sFvw8>IUFSH zc%Z8=Ge}We-Q|Qe>u$A~4c!Ri9k4Q+>uQ7#++ZYx4`x&cLfHIULQ`Z5&<>o@xQHmc zAx;NaV3j?KvAl>9+WFV>TlHXrq>*LQ0Ej(hgVAi=D1AR*35JeOPVzU^YFraaGCF{{(!@+(JalyNN5WK zWVj&pkwyNaSkGu4j(Hapn)3%uy1LPra_FZKc}pYbY6t93>k%%Kg?}2sYML-(ts6nI zfPyhA{xpJ@a>{IZf$5uq6N!SC*qSxlNv)aL@}Gxdzi+KMpx{_DUndjEL^DDp7x}m? zlB{j!(1)1)Au&P>)TEbEGA zuZ}Go*2;hHNyrw^Y;k%&8;}Wmvux3f1lLG34xwqn9_9tQfcv2AFI0i^j!ygdz4sy> zHAcp25Razw;Zdbg^w3lHJVg&~#6H){Kmi3a!IPZ2PsSD&p%s17Q+HPMqDw+WY^U=r zQ1B8fzGjPn^f!D96i_g|D!<}hBz7vz6}bf+i!zhl=*NV zKox#CQ0t6ehyQI*&|G~u;DUd*-v(hzm~i?E-9Z$>=dvaW;SZ2a6@`L1KpuLNUgTcP zW_xkTdb&6rtb#TH;zjNz<9l@@=tw}pnC&KHvp35vv^j`ETVcnSxOTh^FK!2i?N0RA z0w{eEaksSx|a4G8a&)L&^yw1Xoxby!-s}x>`HAO2u&W-v1}LW8|S$9nO1yE21^hHqx5_kgOv_eTEUnU`1iYrh;KI1H%UBOotW2boc{0z={pShAx@mcixwuTyL0>bl*Am zzH`t0ozJVEKGUAoJJtHV+nQ2J`}vc{k5%^L&ee~vRfe>N*0^T>gc}Wc4%xppC)W7& z-=AvQJ?lsmM;>dabC!r74Oww5FPp`NU@9?tGk>FkO1S2xy9tlxWmZDnJkl&nB6+ny1xNC*NFIjd;eg}6JFa|ZJBF>;YLsEtXz+ewuG)g?D(ileO z0N5yxVY_kYHExDZdM8e7xkQn$z@!b)v#6%iSf>jiG?%C(zOb8i^8`)sBo*!HDou(N z#!aot#3%(u@5Ii56w_W(%2Yn$-MoQ#>oUuB=j6 zXI&faZ6pw805u^>&&Z(jtPCmMCqtI!WFVfGp~(AXsPF+9_VWUzXl9cashegu`5>if z-zFcTUYgtFCCbwLCNEQt_HXiG8l(f8Y|;=dZ1M^jv?zEOlXOs&Gw6`Gy+KQ&Op=ym zGD(MJE=i`$C27UtLgrcj_=#AmBS(rf3Q-tg6iIbjZ)*PDq*P;CqlXS zbW1#R@3d_f>2okXUT%u@ySPU-aY0JsjSRNQm|1@t@TzgjRH+C1WUZyeY-1bf2Bo<- zphi)_U67hm$sAlZ+MLcAPl12}XF?#wZSxD^5amGJuN`(a9=d&Q^Ut zte*5_!|W)bJIFTGc0fM0zFZub7;k9xHhkmwqT#3)TzJ|5k8^wn9wZQJ? z1Ifxdw+Coh{iB0F(Zbf3X3pq?K3SY1YwFh7Lm5rCKCNV}`k~&6PJL1|;Yr+2eJH(( z2e5Ep`*8=VggJP@=ZBsPs4zskTo4=;@&Q^@s4OkyJ?kkh&=6L8bc9DtjYq8~6IttD zrm=sF#%NeVa7^L}cIR;#<04W86=(!iB~PQN4lX&l$4x;7@3#K4RJb^S%r#NN+BbNT zCNXYs8LV;*rA6Z@WHuBEr(RWBv`{E2ty)cEsmPh}G~1Y^St8MF*3)bs%_5DLX)jIF z98J*-6q~0!lDTAZN#lEX5jZXeGR*=gt!5{)1OB7qkvQu?o5hP#DHDgb{2S(LEMa~b z(v}1M4PKR&s{!3Xx&FN@KDdmRzSf&8QC|Fei3r8|FFMkE#OGV#zW{J9Lh8i;{NPpTVwFz#4h_nM|Ds@XaVPjM&`E*+0-blD z{{~&)7qRdXq+i+&{iP~l&dg?Px1~#-_)8-Gk`4Vg0qsllQVX;%+w>N+mjkpwdxfs} z&|V2Z>u>?>Rl4fe{3~>|`5wPQFZ;)oYsH#Nw~I3)E%P-5^12r;jq7xsUXgIQ?uE;% zbe(TtS6ZWMbb~I_I^yM~=QsAH!r*TqUXRq8zH&AdP;hG&O1@?(kY&*p57CORjV5x-t9q$|)cj+!&k#pSj&haMQ<+tb--KIC_ zEvnHS9OE8c=KHk4Z_|B#hpPN8Z24}$|K0)R`rq6Bqwjy;wpskH9roX8^}7e~jE9i+ zFyMFKRq0_hpgSnn?;eU{Jlx6e9?)g+y9ac|!Pt%bNA!T-!}>Zlsc(0``e2nX=@w_I zd%M(Sm%8nK4ab6p!D#1{!A~^82i;sC@fd&l9?HHRs>e{Mn73|EWuhrgQOeqTe9ZbQ zIGA-q89cUWFyEBANC{VU~DZ{9mEVs#iK!^P3UlX&S zU~Opx`}{|@Ba|7&VvcYW-E`D$U9B%F(W?nE5N0{l@5kC*1fLZ0RWIk+OoIo|uNyQV z1w_J*2Pw(BS~OF*7q@Df$7W#&@$D!N^E|fA3raE)z)3``Q>$8*Nv$!ET9ez;|C<3w~&gb1T zOQ58~FS%~)EV4eUq#GsoR=L|G@7R;u^eMN}TX)tyIQtZ~nldAPM>_$^zgVJt3srTk zGbMgK)nXm%FO}Fna4HZ}35b~lwIybcO?hG}BBmnRo4eins>U;d#w?`F2F^VL%1sQ@ z{-cY5%!IdH(D^O{mnaSJHi)`Nc{5_q;fOI%Z>74rJh`@EGC5Kz6_yHt*xvq zH=0A%_tJ$*bGcrbIUs}$vb7yZX{9{BP`M}^8?psixn8}zR06Vu4KiROTP)Y>%kv^> z1GM1K4DH`1QZ|TZ<7hU@DRYDR;$-4B4fi2 z+OSaIwMt`IWCNAvFP59-(rUBZXeKvA@@F@p?~z>IEfjnEy#9f~q2ZCyu`T0UC$>%Y z{E#0y8^Zt4bu<5Y7YlFlXnDR~I$B=6QaV&GFI4tjD4nV1r8lb0D=;rSX0=pax>#DMTonVmyF13PgLE%g zF|_~gAdSjurKy%gVftk}{4RMO*}7WPyo4`gMrJ8P*%<)4S0fBsk!SI)pd5{I4-H|O&L6dcvtP8TPMYezy zlPwss8mJxFf-GtyIDRT-gos7iOTGRdl>og=)xpx46$$;3#D|y>De7fIWqCC#JpIv6 zy_BC3L-;7pAP9P8pP!W;d7+f7pH|~rbo;4a8y_yRAU#OwSyJ|6#m4V9#@9gYr0kai zB;+6*;3tmqfJ_UN`@ZqejPZaTFqxrD2PRW8O({yV2f$S{lNn|*115nz^iVjmVlr!( z)IcB~VK5$$g)a%22a~@{C?`3@eDBVqmZP4Q27K$RlL)gI!EM5WP!Sh1#io6XQ6pg28ATSEQ7w{+{ z`+=w5!sB7(EGSfc;a7u`pGW|2#+mZPk<`APrHn{t8){{o5NG>{>D19tUGQLv-n))3H$=j=(Z3i0c4$v1BSm8yja3!}de2-OJ zgBNgQM62tFhc7DK}HS|L;%hGeqWsSp2JDBq56+=xAY_qPA^o%gPDOnb5) z?#`aWQm`P1D2Echpne+ga^swEYH%%tL#PUEc!01{qiy$BKY;F2iS+{XUI_ILr?j(Q z8>fs=u>0Om`in3ar$qWgA{Ufm*HdC4)xk-NaMINMIcc0m7uFphq$d@m1KMW>2OQj#s=7lHg3?A zy}WIO(Xb>HH4YIKIckbQdWwa}r`)?E*9Qd&l>v}TXq)(Rg(C&2Pqjj?Apvr4l%_m- zoR)onrpa~Ty4+zzr>0mSi5J^V}4AXQSDlm`z=ln$8~zv z#J$2eamvbuMdaQpYmxr&JmLU9w#n|E*ysYcgLCp|AyVBMG%M!cmjJ{BdL9 zlgd+j1R5|joh@6cHADXC0vj3wLeqxaOb+*MXbUKZ=Chw-UB`PoIKmSL19s5}cFk|V zBe#sqSJ?NfmfxvE?aGr`l)Lu#tajI^T?4_skz!0Hhra*AGgoyBgs82o50gq z3M+1()dxCUDscPkQ6IO@D8uOY6a!G*02m#FM1%fB*sEZ#MuX^yLr=lVNH@qO8ii!D z@&+WQHQ^1&VH%PnXhHv)YUjHjn6wmhONdurc!PvbnUpC`Fz_k&@3*zjOa(r}E^c}1 zF2Ms9JF^m{2AB zNAWxeDIuYCo$ESK!B9edOa<0qDm+|3?V-_OloqNfC>L5cSucz4rqvimBBH?sIYPr+ zb>$drIbwO|C|0H?9%FjEk^tCjzahB=@ik6^aw~A4JcS-Uj+Mc&CH!jqfuC>z-h|H= zK|)3YEhj8X_-dTp1#g}3&|M>zEU~w79el?BpM#RbrN-4EPb`P#tAIcjP{X)?7gw|9UnEENWFBX7>}s6k?tO|U z5F~X>Ae7+5!V?Il4sV!4&boPUKQM7R9K~si2s&n?nA9;Ftob76F3eadkqZrY=R%WZ z5qSReTE;}lz0N6sK5_iI;h{ZDY)*9hi;DuWQ>?mS2Rm36=L#Zb8Sb@n%PaHz35@lIu zQjKof8f={@Yt7anqR={ISx4`}I+DwXDeXk^&J8(DsCoKMuI6WJayxCuI%+{4Xj!M^ z4&RXv2H_IF%8P`OJ836P^MJZj^@&i^F4`$~v+q4XJ7_nm-h*CIe$B{iCMx-L*fkUZ zjvaZJ*@qAhVLW?i0Z>bF5ADHcUGBx!-UazE{Ou8PXm8N7JQ|a3AMI66 z|3M;-Jskpv=%D;IfDePh;ds+>xJGyuguvl|!eORxxD5qteJ5}Sm7?Sk*z+0K^GION zqexlD=oqjnkyR`HtXfAwaok#SP@JF>p+}vt97^xPpPr-R^jtvUd3r{kr03-ebVR-g zsa~{f^#WGLRwu)+p{>5Fwj>U;v(+g&K}V3XUh2YTr-5P)wC60FVXsDWHF_4Z8=K7q z8l7W}&b6USr|CG`>@>Zk?e#L9mfr*58DKvXx7V2(;aOm>GXeH9jQva-?AZD~?C}b{ z(qfNS)jG1rS!>PqI7jC~dz`cEp?6`A5|zRvb&g6%Qs?PakWkr1C8;u%v5u0|g_d4iVNmZyq=Q)HcK?q->io6W==mO2tWjae25yMvkuhiE*vWjeTWWmZ)ys{*T zo|N97V7Rr*pzb2%pL3azvV`&!F7B^x-(Icv2Gft%E% zb1ZN(5O|H6@^z}yYt*3EX^B=L?;CVhzDdjSD!nPM(V~0{-tv|e2iLGNad0*K8pgqO zwIy+&T^!s9xa^R#)^T(LG&d~&$6k##YDzc8|2F~;-Cz$zi?oRzt}J{M}3qTkklgkh$_*fVlFuL-;Q?$oz|E zG5n(7qRk8LCUCC?a#2?Mf`%EytNJm*qQt+61%h%@(QBus7##fZH;3cE`WbdVuhfmH@b#bL$S*$FvM=hx8%EmK#@jRlckq z1YXo$OZuIv`()vB@a^_o@oQZ7$OLH7o6}~<1nVykaPb-omoomOoOkyQ|L`f8|+(L2uj7cOc63ZF4q_#Bg$!KI_ zS(xh6QVup;+@5QUz5MD=I6h_-^0ZJ$J@qS!EWa^y+i_-1GeI zIcKhzvsp42W5&-veei${-!d9T1HVGe<}IUX?C~qyY>M|${P7+->6rIIu(X~6=GIb6 zi4p+KzS@8*^pA%y&WvAnYEH>5hD&ZK#4K|Owl8c|tCd=PPQ`TX$i~z#h)uuDLE7Joa=$Hblto=IR^Sf_sk*C1v!Hv#^o+(eFj^UC5m}6k28&_O+wm;8YZOh^kHVRycK1XY-2EgqwI?-`{H&2`V)bDi6`uf z@)$)aChSX6QrZWu0#Crcgl1n7l&~+U?2}AT-;$v0OCluj7B>FW@QjSNahYETlp=^` z5JVA6+pt4Vj~#++KQJD9(Gx}{D8)^z@HC|Z?;aS!J$ws!PlP{18Jo=@{AuA{8t$bv z_cCG`?qyCejj1iV%DoKlp)l{I9&S;H_d!mdCdUG06g*MA!K0EX-f%E$pinX+Haj0_ z5+is3B=lIxs7EkKn4{DKXYGiU=*;+6BABsIBhnP@!Dxh4PysD&!Zwql7XygdMjm2{ zk2yT=9`XcG038Ajh!>?rz%w%7p5HdJEfL~8Pym}WYKQmkL#Ra-h4pP6LWI6{jzk+r zVuyP1)u#DT=fZBs`_CTPvyXqcBT!@UK8lb)jt3Mt!u!dfkj6Q+SFEvx}3(+WV6sm%xl zpfu7YAQLHIgV+O*_5h?AfHd-aAZ=Oc`~OVH^iw}E?~wLO%=Ghtj&u*`$=~6F`@qc5 zK6r*{SZN-Xnuqy;eQX<{5w&eZZX4k*>|@&~jjC;9)49W1d&Sb#aLgcc|^n&m0y(gVoYJ_= zU#m8rQk4(oM_2>MjCd7jk|kh^pu?iC=P|K}Sp+l&B$zPC5JLOU%u1=@$&id*=w$b9 z*@Q`psgWrW^fRpGDV5h`f>M3~NqH^;*pi|AN>Y3podBB=e2bD^v6th(NFZ9wSCqyy z1{0+_pp@ve5RJ0^glKe#ju`#nXMq2p?9QS?$MKWI4?bTtj&;z7RUG4pql}YOe4JTm z%Av<|O-k-wk2LLY6vhLj59iqXVr09HlLOjc{jG`96~Y3g3ReYceLxm59+I~=K8tMxkWf&2^L2rp;!xKX! zmWHSYUhnW>|15bh($LWwJVmYNBhWo!x6pyLJdGfWNTEa=D3!tyk;0KKdg%aV1Ue4T zFu(zZ7w7;7KKU4=kJ%jn8Osyi`2ZR7rH={esIA?+Iw&gyikuqx_={fEZJm*Ek8Yiz z35^!nA)+8Vq-960B0HRTP@9PO<6VA;#)BZB1oFdebcDZT8~lhL=FTZx3-eJrN{2*m zIqEecczBGC@=1ZU!}JnO(gYo`()_scQ58e}4dgK}89;6!R;|P6mr+??E`Y1dU!hm< zx5!Um=|q7~0bj48&Y$oB_gb6vjGt_yBm5Ltg`Cs-@aPPkArc;)@jZH-&hTkEK_}@H zP1CD%8VtJ9Vik3cbUx(iQ$5>#U>YA3T_duy!T@4`YKfQUutUet1iwLC56=|XTCL8+WAiij?D(Mt<-QRHlaK9KqPE-mouU|fRqrOtdU z=s<`mik@A9-{uA2&2w!VOk zWc)U&$$UYpLxR#QZ@q=}>mD<$%Yw8)8<1Y)Ik}oE@I0O81v*bTA92MtXBaQF(Ggw- ztEfqpeK=U9DlH2KtGwW0Fui+bOdAgt1d+UC39Z$L?-6XuOP;hhijhOeX{lr*|1!BSJTBa>rTW+wmCAbfY+O?@tAY<#4T zVL0=zQerqC-cYU8D_rUY>cyaop)#Pw`B3*&-iVgcQ0CoYJp{r`44!Gs*BQ*$e++x` zHAZC~;Zc0HiDC*j|E@kdC7ye}7FT+0^EOXG+ZT~;)BNw0HjI+uQxZn!h#tl*z5tDmAKa)ynP~ z)hkrnD%W!*mu@?`;#SQ~otJ+n50!G8o4N8j)$dhZ%9W|;+^cNWspM2==t^#7>SDQm z-CeC$YT~JuD{tmjH>d8nt5ARFID5h(aqs>Avt*%ObDa|0X^WD)8UMOjCGXL{ZdQq_ z`{%T2@!I^g8OoKaMYrUZ1%%35PLc4xmP+lOpqzE@g%6wV$!h!8z;jyizhU~nB8<~0 diff --git a/src/wasm/Hacl_Hash_Blake2s.wasm b/src/wasm/Hacl_Hash_Blake2s.wasm index f9d6889edb710d8b1845bb009261b1fd960d68cb..8e69e8f79009ec683e00f949be0a3edf8511e7bd 100644 GIT binary patch literal 21136 zcmds92bf$(k$ycptDW7M-JQ)@TeZrTR%Fe( zepS`o6}qc?*2@(}mFIctD>oiF;_+4LusV!KVruFtHKmU8sF<26uCD~*u%Ln(kN_Z9 zU6`S(1r?R-CxXI%s5D=>Pc4Q)6j9dra*Ret^g{lLAKr}q)mClGP2@%gM=C}KM-yH) zeKESf^T7D{{)x$Er#!H0aB~01;K~)A=QinzYcepN+dVwE$5U?0B4W9bp?zZmAeL~O zG`hq_b0Z`BcY76XgCZJ6pykWFq}wFx(wLmcjeC`Dbqs~2E4>+RlOC7C?yCoP4-f1* zuy^m^#6V%_xCqXIFR6%wC#tyHu(v&%Y~?uKKMgMz*z-RbgOg zY-rNU%q*`sFb-aW1EU8MkvyeAH)>QwA)_i)8dNv~bS=KG^|M>T?=xISNnpSo zGN_R}*HgX7n58T?Py;toovzMPqtLJQRi0~O==(`-q9)s=19Z9 zJ9*rJIp_puOo1KD4nS&HGjF%WKnkg0GR8z zPVf5w$+-6gJvNPE#GyW(MYDJ|&GLh3&hs1qsc;zP0_P}Frcoh`;So3@B?&8DqlTp3ytez6U}@(?%&z^V{9m*xOm=+K%& z+l1B}I!9>Dp>qwb)59;$Lwi32wgcD{0y}6fz*8MsbLl*xHJ7#vt+}+r&^jah;(WB< z7y{1&a8?NHqI<)4|`9fPA&(0sRLhu_E*9xuXNnUvaFjpMq9hk(w5GBnt_!yz7~A;DG zO%06->AaB6$C38vGN(ghB^}(4PsYJ^++jP$*U*??Oo#T{VxA}H8UxXUa2*$}xJ|oA$=&0bg>@XfDXAGJd97Kz;?=EdjlU)bc3Kw zj}F^no^MoyN?k=aD&agOoUxLM)AO4YNZ+K2Xx}Wf1!ss~7?tyUiy~B;O1edp2LSqb zmNP(CT$R670paTWZSv9WBEs#i2)8PJks`DftP$ci2Pp6ziqOE+(j8g~0FVOD7%4#S zPN}#{INjxPx>NCs72T;c*39AEitbi^>YzN^F^%^qLKFE4q5dAt3jlcG8N;hs8($(l zc8B0z`RG28<33l8mngnp5n2n@^!Q!}$nyh=(7@Ev16m3IkOI$GDO?qLP^55;ucs3J5lwe+Z#0sy4IGe!!~drT@`Dx6;Ga(Ybh-za)a zX{;IdmnnLg@~aNYvjgM)az$t&Ut!!|u6Y3fFFa#-dC}7973glXwtA&Nc5(G8MXyl& zYDKSB^SvZLZVD-WjVWaKwWi?l>!d)BYsM(N28E*UL$6f)dPT1XwG=;L z3K@QbDR}%wQ%LfYQlKX^H5A^UsiE*jO$~)7HMLhMev_g%fm(*&YziJ9HH9R<#S~Kf zRw>Y%H8m8DYHBFFMN>oJtwIgM^tTu>ew#u#{X2zt`gTzS;G&574#A*7gn6e7?^7~k zPq{Pp4#j`32+aj+bmN^4kmq+PLIYDv@6w_Hz6{zpY&e_(O_5q$C#06K3#XMQ9>l z!3;jEc>w?~JY#rygo%#&{1NGM(I|XWKKqzR@-bJEk0}0UMQAQq)8~&mK%PIY2n|dv zeO$`{0CM0ND@T!1eM00YlI)Z6*{4K~Pq}h@Lh-*SLUX|yIX>wC1^%=mG%&UFX)Ol; z$bn~!9H93ZsramL`mD?8Gm8IJ(Pxy#nz8?!qR%M_;CM;||GXkJk*_fJpVzzqfES)I zyh=>#3)1Je$;Y7veo;RAl1TC;SCTI%{x?NvE?Cp&FFHV;zpMxiOf7v`%K-p#;8`vQ zP+t)qUn$}7@0y2T4Uda;3ti6hR~4b&RMJ;93jkn&XE}>frTUt5waBl0T|WDUi17_q zjISyFrXn;KtP$hu4p884DMABNOW)FR0Dv5L#>fGB-dcUW60RS&NV|bM)@b{(9MW*!w`Rs=x$q!vgzOVQ{6`{Fc zO`m_@0D1nAA~Z0y^dl_?0LX!7tQ=rQ#RD=@%}irxpKF(bGy}&Dj4+(XVuL$5W#FGm6ke zzQWi)qj>=UFFa#-Inn*s=x-F=eqX zzt@aW_yY<_!Tb@0lobAiLWX{$_|J;|tfTv1Od-QZ5*p+2ji!*~o1{R0(bP~lk`QVr z+=xO-3OAvUq2DULIYBokWaV*-DWv#TQ^@dbrho;p6zCRw;z{9FO$~+HG&K}%*VKAt zxO(G83(MyOVQ53Yb4LRCyF2lSPn2$ThD z^anI8P~dwLga!sd_iFJ0KuA2x=M<>>gvWg)Jnq*#1Z#LeHVYK^fdrv}LC^!52LSNE zGnU7L!s9`g#~lfNC_xw#!5SVI4GR?b;RK<9LD0jR2LSNEGnU6A!sC$=9*=4sf;Bwu zPUt2YDDYzmLIYDvk7*tNzyr@X9#ma~PTzFr5uGAm7i7q*{1h_aDVx^RQ;P((q|Z%; zypm^7CCz|RakWre<9UQu!<>TfDJQL2EofL}b-(8{0{e(UkjSm(8n8vC{KVKI*=5+K zg>4#ak*K%1li-u9I?*;NgV8vDs$VWLsQ|Z4Hu@HOS|;fi$;UL*akW?Doq_0IRQOcue!^Klf`XA!e#)_kv>XIsP^nls<)=DEn$w@bfTs8fa)xqESd zEt0O!P%DwtwNjtdwbE>vV6(a0x9DhRB)o zQd-K(kX6^&U|J>&`-NeD3`1(9g#QQ-P_J zv2#%iycEuCQl&V3mg9Wc?`2DQG-w_&GmX-t4M3VEgY6nSNMrM$AdN?y&pT3+pZ zs=T^+4Ykwq0hfj3YS zt(BfN(P`3~COTa@&_wG*mS#Fb>Y8c2)HRbYb_ zQPpuwHOZBfq)M<5e`!p$N=&s1>HzB2AK^Sv>ro10BLbO5U>{Kk++Ye0)x-IuW2$M_ zR5Pxr)>@l^gOXvdw+Q$cnSch1Xq0hIag#;h98e~u#Ufg%Ri>xSBHF24CaS|CI;m5p ztji+0saqzmhcZ+z{mM{-40MK?WS}$DB9l-nCx1A+qz#|7bW9a0AsuF7#=#nn#CkzROm&v2lcT7c!(7(Z7-%SAJ4XoYA- z4V@xdlctq2uxVN)b!l2Hb!j@)nC^_Cp-#W_CYWwAakyei9i9=GZ?%8SJBmfcF@1_} zt@s#1y>|Ff#Ep(@_&F{sA_{>SV_88JINh(siGHFia@Xrv4LR~y7)aQbVz5-PQM`<1 z()Tter$|&tVU%jK=}HZF21s|}egJxx%%(~gcz=*fF&LgChDQa%gL@2S*+i8#OjZyU zN_h|&IeI;Y0>dulFj&=+Lf8=_2kTDVl(FbyaamxA*$Pj*JSK{+Tv6_~nAw!YJXwUo?eOSR&=!W*+9Mib4-QSihpZ!?-^h-K?K zNwm)>U{aG!Q)RZH(nJC%qDK1OQNhZ!7SoPP@tlXp)(BF?@*VhEbbD?aQEO^5!M~iw6Iykgj|zM z)N3uN8s)$bF$SM{hq0Jpo(1`3mC`3>xL!*^vj7FYXtwYZh|!>2e~9N$HFi(EY<58r zjk&Ug(Xj@Lm#`QNoPaEwo2YZ4HxG>*?02wxJEpOkCI@NKEk z>Ip`F0%MA$G(12!x0o5}Z!t=5!r|RdfEwHYX^X2NrB@m71J%efG?j{b+bu@irchj; zPFUPfL{R||D;0X5AaxRYAL+@oHwneLrGx1(jbf&Q#QUT|?-P;N-bct!>wScLT|oYX zyw7p0U%U?n->SLpe=$l=5xk@1N%X4%FHA&|vL;UgXbtRMzmUohKh=yE{8YR6DaQ%G zLk0H~yR8!dE>BIy*O<*bprCiSvzQkrR;J6;$k_FemzBGVa5S^v3-j4ZuT&Kt52A8K zpbGGczL#wwyt#st5?|rN)8D++XiRUYF{0p(>dK1Q=&c^gJeXmrB;#hhqd1+KA{a}u z6HsZn=#`ea&QOhLoGdnFK_5C9PgXU6&A14c`XU(jOHY6cl224GTrfGpMUqGubvx&h zP;$8@F}4`=V3B|=c<9<%#I-dIb1Su2e-7VmU0WMGdEl1Xg`YtBK53~d!jyHuygI3g zyWk#i-_z3Q1QgKdu*E=>2xx<;M6lU5FG){JqF7d$E*sp$3+ml6{Nacm}>&(=u6f=X3PMq_Z=ktQkxt(%V+M zYXOP-t_?KWG>X#4cxCs^}@CuY_ayVY~hSG+ibwrG>T&@{`%Ps0RBU^ zE!LED)F?*jsL^PmhJICu&ym?}G*JVPL=F9F0KGbi8jX^6YKM3K>sGE&GznL(_&&6i z6Ir>6mx@hP=*IE+j%TC=+fdba@Cn`%lN-7rPfH(Z33e?=u+5b`BAZSc;g>J8pS!FC|P zK9PokC&Z7}44*4IL$jH3Y)EfN1L_j@YV}N;2qfGjB8BvEP-5vDEa+nsuGLL64!awL zok|uFCsFnbg+_@iW)<6Di$+7Cni7Mc$`bt}2NSMj5%ufP>+7R=D_!G6-`G&q?zro4 z>jC2o-F5laBcc&x1lr{ACo5`z4GskBpf>$u1{1_eJa$*!*@jS8=&=#R%r6{Dse@i{ z{DQ=Q*`Rw!$Rb2JvxDC{Sk0E%>2+Y49U`)`L(c3NEM|u#j0B;bh>zZYjf(7Z^bR$T zYxu~(&H@}QLF5Mt5uD^XzT&x}L9?;>=tJz9o2|!&L&x7R3}btdIBErfdPaH@;33Ka zaZDE-suzCUc>tyNnjn$+Q%s z(of4E=5lPNR?uQ(Ggly$xrkR)tfy6uqg)9nP{~tl(W>NXjbeNdr;?|JF620>xCWoD z1<$pPDgu|MwRzpcVCAqjRF<`(ENf%<;g=D;qKa$KHQ0es$J1yHpN_`sz+s)U&(;fw zb$LQv=oHt59M%bkbuk=(3Vu6LiJ^qM;PsdlAG5MPoRtl59~)^Ss74|9Obr`%Aqu+5 zfki>_y9tmp?rF2576uEgIEyynrv$-f`fT!f3!TkdX+3X4ueLdJwG~h>S6gh+&eb^@ z#rPm@uFj>+v>xu`yc3wS?I5uOymvTr23($YB6GUB6DmpJobu$Yfaaqm2I7?;qca2S`-MPLy*jOXJnbL1dAzPt!d@)d9xSBef@PFK)n zbTM5COB^V17+1ma*AF{BLm)?i@@cnkk>s7bg>X>OF^`y5W|lq=wi{u z33N>lZQw#CX+r6-pY zxwDc=98P1!Qy;8)8vbY3_b?lMT;#YhS*V9S6*pGrz+yI7m|L@{chuBip`y4qf~B-9 zd>ZWvK1ea^c`n;H;AfvE>x|X%FgIfS8y&@J0wiaYO<1MM64W%p07vW7V2#qm&0yO? zEhoknXT^ps^18!8A~@POV5hIy;yhWiHI3rfwlbDfW(j?qWH%0?i#86}0$>GQgu-4z z0o5$Y?soH2({?PhWantMTQF}IdF!O>e?2rt2HvsB-s@rS9ObjGtURo&%)+V{ zODim_Is@kON=V`48eNYTX$y+rBob#hj&|-7eBBOM2>4;F8U2~ z9cBjkRI9s^O%>g(4`#zO!fZsW0iRh+k35~XkA@EZxg_H%-o6;6y?v+o!vb(pa z;&+)jCNV z$%vF~pjLh2IQn9E5mQ!HVNBI>Vk!rQ(TVA7d@u(+n&T|PfXmYy+>P=>dj}M7!M+@S zKp>9GY#buaEzLf?JIJNB2Butt0dz0zmM>PKOBiOkB)&B*!EGUs} z?DPp%-f*1281=y%qliwMJ0uu|24@tI%hb-LF_@p}FPb3TiIWgXcM77b6(q zNP0`4z5i=}3&!3bnMQk~%U?QksugqElU>o?vU?>Fg}`V|fE(id9YYJH*AMK~8)T|&WyRu>gsXf46{@^^)bnCc6yE|2I!E3Q0a(Jwcj z|4-jV$8IfC)Fp+$f?r*kz=B-qaRN&;Ka4*+V^*yj#bh^Dtf!5Bv-<6O*aJG2oMhJhxZ~w$0fpYtFMc)E%dQs%BwEg$<|1aiq B*0KNq delta 4642 zcmaJ_O>7(25q?W8C6ddZl|)GtMTs+_BvXI3V$0NTWUVYaa%9O>63cPYMpA4iHc4By z=v<1#s|E$q0!>k4fuOwvFwmk!0ig|C^jNq(^wN8;MNjFir=EfW{brY3Efu4Hskbxl z?f2$=GxKKFKl_#T&3;Pl)$VIbDec)04i8o4X=?rH?$m&0Yxb`Dr^UAAIpqFzHLdnn z|NLCjYR-P>0{;FkG9;;MlZSPifXKk-`cW0%#SGjYxwsU{4THU{;Ivmz}wN7V2&*suE zS$6y0hj)O`1>j7mJMa8e&v&Vw=%RH z&$LD;HYqkpS-PcFEzEEnLZl%iAjBI&()#y*|M;Iz%owLA zh41?1eqnJFm#lZjv-xA5e(fW6{jR+WG+crnM+bm=8`mkIV-|>%A0LjOTm8k zn9fG7cbbSSHIJ9_mR8|>%Q7C+y1wi#!z9$;j3!lJk<;FvjuJC42Wik*&W%hLsNlg2 z3d|s21_Lk!NCH!+!+47XCSKqn&mzrgabD@q+QT$#kr*^AfEosEUWL4&MU-+hcZkTa zfMdHWlirY`Vd9c9F$|SdnudAAQhW*qow7n#8s~}~JH1F)U}dUeAiUx{+n9E4<_p=F z)!Oj>>82%V=ZpL$J4PYWtjOcQ)T9boX0xKj$~>;sYT}ewU?l=ZPSLE2h_iyKGdS9@ zZIdb)K(}G-q)NH);`x{alu2A;Zc?@de7#Y>k&GM{_l{Xn^Kxyh4`@T98v2v?wPr=vA@apd~RT zP8Vb@PRlYECrjqyv?5`ss15!Ib1Q|Z_4(IBy)9-kONun`*@8IM&6Hyn7wklqbW2^v zKiwwXU0_hyG{as-7-3IosUy)E6a#96qZFYi_BQ_ULtF)+(37<7Mi+;z7Yci}N&>u4 z;#jR}ws4zRBf$n=SGW~X(&~k+Em=ZsqDS1e)#KJ%g`HYhhqZ8`ifdubFsO|qmWl17 zvVrLZ4)9}Qmv9jk{3!5o75Ui}gCsW+d=XgW!rLeCV{8B))HdTMDB=E-^fL1koNQzvsEJf9=h}G8Il64*aNm(3CrD9JyqUt)=iVTauU}ry+U>yW zz{VX`#7{5i%G6ONEEO=Jumj=hGTmWmW!#o8;jo?D6u`?zGf3ce?zFIpm;)x$c%uv3 zbXggH8|cbW>MB#GWU%vOEMtGmF=_4&n1S@bQZnON1bMj02;)nsTL_t0BV@>p7=d<= zAUP!gp*)+)*qkF2f$SYouu~p*GedAW$6$Iazz}rsdpYnrq&h<}VMyZnTZf5Ko1_w6 zb{UEyC{*3+$E+xUj+Y5@uH`n^k2kOp0;qE@!M4;%KK}d^e56B9bX#+OZ5X5Yp)q*E z_yy(uI%jR*%;Uu4Y!o+-lP|4~x^8qSi~EMjeH-CKK*nxTG;Ah$#JijtcNCv;{xDmxi@4f~6|8;0 zqcn=K%41NCRd}2NCKm>>geRMJQGB{d*7*#U3eFUb(P=tEL=!ZHjbEWWpQTAu zfU~&%$N4;XIA6aB#^ffOhNxBCOg!g5dhrI@O0J5}qat_WLgAU?07gkJ%|bN?#JNBK zLszD`a=<=F^#GoeAe)o&zBx;IiP%|K8an#ebJje~@&X_i0l9e6S&L=DTs`Gzu>rX# zkc(m;B8^{d@C^QA6_hFsbxNb9293)ILyIin9LL>?D<7xw#eghfx=O2{_UO|UwMDBd zYWtCHLk`!qzc%<1U8GBvw&r>2GF{-;=rXU-5?{eVt_1GgHAr=`U-KV*vR`*qd_@k^ zg~P1VDlHv-%vYO9-@w!+fHwoALszEFGOhTb2&qo`rZBWAq`$sF7lrfrjW+l@ zfZGDx)=9#*%7nQF;ad&3ErHu=5dH=TeUsj7BJ?d+J|^^fKo&x`Y1=1sJ3y$X1)(?S zhF{^g>Bi9~{5HMipA1pUH$}DsS@#aI?p>hLO?rpkrt9=Bg8#dXXmERUgTII5TB;oR zo4ZoLfa6&h@t(Vw-t+Qvhj#E0n9^If{d_vkK{X^$)LMkNq6Wk~g?x#vH^xk>aVa9-8baYbArIeNc2 zTpmER55)aIxIkB?{c^xQNcC{p7ys@{xNJS3>k=*xV5v(@Jxw3d1AYY51E3z9jGBWo zVXhH12MyE%K|N?hO|9Om8T{B%27g2!1)D!<)BgS=)R;)M@ z#`z_;x3qJIV~rPVe9KWvx}Q$(bE}infs!rFb{C-w2}Ec&bur2kXV9+og1O#+hcl4i zVQI;F{KtB4CNqr1th2PXVQd=LoZoX8Um6BdBa1TmLOn#nNWENd+)l32;6Ak12KC7W zkAG#(QJnjmEK~S&Y_>Fy&B#*d==13D`-!9x_cr6eQYjxi7*NQm3)pQCrH?V@%v9DQ z9-<-VZ`WKJ&&oHP+w7rAR(J?B;3Lm(OYvnUM$&LS6lIa~u+VMeTa8Be6h3;~4q6UU zJhAa;vwZv+H-^)UW213P_xr@ves&NEJDMo_VIGa?;k zAP9_qIf>QYi^tdO|8GGRQlhCP< I^Nqv*0pXi$y#N3J diff --git a/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm b/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm index fd3c1b86281cdfbf43173dd093717f4f2d448d64..b1a26f75e6b90a9bd26ab9ade9ca6c22708f4a85 100644 GIT binary patch literal 10194 zcmd5?O>A7%6~1qNJU?&7^E|d=$NBNO4nGMYfrQ{tr%m$=goFSIH6-EJOzcTAzcw@D z&>)Bdw4!QvNL8h_3&f^srEI$BqKhh9q%K%?*-aO%s?rVWqOIER+Jv3*Arcu)%?a8H67_=6RbG)xA(CsH+81eDmBXW zc%$5i3D3QNMK7$auCBB?W7hrRrE+JbUY;@LVntmvTMn~3+eOxDfihYQ|F zadd2a+r;GdsU16a?H>A&KXlfG|EcSy{`mt8yxpftOZCN5rS{du`Fg2VKHOehs5F)j z9e!-F(yVku_P)&p)>dJ=yx3T)cPb)(?;Qo}rFKUQAK6?^r(^N0O6Mxl3&gY+OU>oQ zTKT#t?%Ud9EIUefk`-f*Y%bC&x62*v1hgp+v@>Gc-mN|*CZ_KoDkcYbsUs`44-i?d zTqz@*Qv(usxm7NU9RmcGR#vZzo%e6<=~c#sN(aYv*MOjPNk%a%wZ-m?MksI{cd|eF zJ>&Bi%yqHYjiGg4>=i<0d{1U6>!qgD*!O&=?kDPMQ1xQIkSUp_6s2b&?2_vX+)&nc zYBC!?fg5oqaY9Lxi&3IpB^P=lp)L(MC+WD*B6E9$FRETrP^L@3{m%#5zk=~E<)>jk z9r~AM|L{LMul-BPAxhE^`gI>#|uFW)Y?^EIEwWY|fa~LjBC!Mc`^1YJW}^-+&YXzpOPnWX zMS_Os#Zi&gBA&jC&@g3Z#TYL6S=7L=EMSoydEJZb%6$|WIYJ{k_drNZdRU}qEph}S z&i#nVy%y@X$Prm2AxDw_j~oSnRF{okx0AL7CrYJSQ$%IT&f|3jXAVoWwVkc8@ z657K|0LRM7v~f}k!F;5_03Z!037LhHzl5BJic4fx7AUqMI|XFNsVNi=hqnffVWu|dO!(qtVx0szzC&5c zdzqvfp!|8@PEj6<^%GvYw}}7E-45YNAUuxABbY*$1|?4ij1lM3s6t8o);ltXB)WXB z<5k6q9E$mZa%^o%iAoffkY8UCzr- zg~L20yy0F{{Do~JD_k^DR&K#2vV2A(O)K+qQ^e@g08A*gE+xG}(*OLwzx(1{mkXY8 zDQ@vBHy%4M?vFQc2#V5>2oS4M;l(vuFq3Y|7R=jelYxP3;U^SZ{5IZ?Eq-I%Ne7e$ zZUHZWg?yA&57S3n#QrgV`J|w8K!E4b+PbDx2LSB0pbcG~bYCWiTSOLt&!EMPP!G9% z$lFt3WHfNsr!Bi1xX6u(-AV_W3)8m|b+w7$m2A3+;HbMM>RJftjpAZ(4^TpV9(c#z zB~s7rQMLCxS4p7#NbvkcN$VE!0mvw?~tr-ew%R!tIk`&dnNXO^UjwREof| z+@#edj*Dv2o^H|@%?T*t3;HPKxJi3d;3n;%=FM&Zp)11CQLJdx-x+c!=BhM`&Km*+ zBeS|uu&W&Hx4NPnqcIX*QI6A?+=fQ?$3#Eqeq#6{Zm^Jq72%B%t}-qYT>0Q(_s>7n zkeLo3!w9#4x>tBWGhN3ML&W zFy>o~!wLfT+G8OR^*h!fwmwu~E65I8;Vlfp9wr@0>9U$Ya1nJwUuoPM@*f9r(r8U? zqj5Qb2u>oG+bncWU}PG=B-`VWguq7dkIC(*uPGXpJD_8SrDF;sqhot`Yw*CINCrJZnInLeI1Ttu~()p!vKJtOeGv2iXK_)-D_5$Ve8rWEg5pa(8|T zB*>gDaEURs=K*`154>?MIqP`ieq!o$JgU>KCFq)sYEsv1Fy_M{a-!`wECG{SA8$gHnk(!7b@uJhhU=sIKin88Pl6#ixK4{tnw8iZPF8 zYtQnYRqto4zNWUyMmzVtyvnqWC0pZ;6zIn>J=Cvfp&!Qs1vf?`dBv--s)T-N!)_~Wp4iw!2~BJwEbbM`kg3my*P=U30u{qMb~IwRhqIx z$fA!9LaG*JWoI|HEGs+H)|j(HL}7Nw%8s5zb|kkE)7pvTt#{=Nq38KA-p5z{bXD%5 zJs9g9A#XwM=^7{Gy}l#wV+7qp_tFe{*L_}I-fuiN2OPN<*$Ev1JR_W#dAPC4PxgS&eYw|&$<^VagKiGL3?D6p-dZ^3Chr7lJc?gfP-ovz?9;QQx$^kkI zHy=TZJwgZNqjW^h(u49S=;NqGl(QHaNIn|ghLC(rO-Ve^50c*sC}aQ>=kVw;*gR%I z5pz{KR#huBJBMQdSdKARj`gueb2QCRoWs&k2O5swraAc?2p@-qbpTw6^$cm4woCvZq5BhkV9*0)7WVJ1RLX912qD zlNK!WEKu?~BAgY9!x&M;S{b5(i*Yd1MN7ew?NNAz4DyL6VG(3LKiuXc?SvVupM zKdQ7uSE)i5sZ3RPU84)KPM2ka>at18as`=Nu`t=h$Y8P&-iDZ5RZ|iV^uy$vTfkxq zkG5g6ZDA2}Rccq&3Jn(90b1IOmUbU|)S?TF#TJ&PLTf0bLoK-m`D-w8trwEls)Wx1 zNL~w!Tw^2G`WV5~Tc#nn^80+cx=z=7_w;|~V+mfiy4djq$Mm6|qsdpauSdD4H>DPR z@diCuA=B5eHRf``h8)1qyQ_i)Pdy80;@b*rp%b!*cM^X}_IoGsg?T3tyiQBPjUnDb zn!vKzlp05@YM;%*BP}+5(FrY>)vx{kF{CW_yyp0;dF?-hP`l;`6H~r0-6bD*ZVO$xcxl8D$4P;|FT!?k7`Vh6 zknm)~??d=NRWN%uEd=02rp)`c0#cLKdr>wt3PspkhKdGkMj>Z5qPf~s-E9^roK z^0%-DgYV$+{rf@l_4+0JLZLQCODm1lO1;#ntTd_IYOS=Wv({>sm#MQtt+i&S(kRpA zQl-AuDrZmX>)x40rQNPHuTbauYMClcs+X>>taYeSTAib3DwhtPZg$R>mpUsgzH3#Q z?aEU7;I;A+%+Kr-8$9A)1blNmv41D<^?9|>X_ZS21i34B=9dhu(p%q9&w?-Y@6x>& z&OUK=jw+4Sdbv?`v3p{ literal 6030 zcmd5=-ESOM6~D7P`#HPz*!eOsNzP50Hc1<|`DmI1lV+SWO-P(zk~sZ2=bqp1oV`23?ld$Zg!Yq99z7DfSF}xS6aP}Z-ip@Kp7Jl<>+yaDFPpqj-EUUlETx)EtIis>=e4RLtr`sZh!Q$=Qs}CQ zsrm?fyDc*6_81Q*&WNmPvQPC;-EgY)%4&Cg-SI1(+DDGaskS>*TNLmk$8U*zzof-A zJFqLA!1jZ5Po#hF2=y)(ibknCF=PY!D*4>kF+q!+>n*= zQ)a22+D!K}rI3%smMmJ2!!jlLJ{FuXe!1G)3RZ&Igzpc{_o%Ya7k7zBj3u_onltD_E=OP}=`J~UFbE)VmE+NIo0>m!bW6ZG%lshBB-rjmcm{bUx zuY@X29y7fs;QAIse#A*}7}7<|0RJ+Pla>=|po4(ME(0g4IXAjJ^QFoZK> z;?f=}GduRsZjb{8U!pw{{FJlsK08W~S(mUESCVG?-e=kS>=^G*eK~4%OrQ8;NbaM3 z@G)qS{h>T)p#zG{=nzqY4k_qFTA(9ICPRiuZatI-X@61#d|b;zL$WSkurzs?8FPqU zpo4UP4x1(UVw`C_o8(JCBMA&T4tN*q5&8%Q-H|%1n(`k8#gLy9SP zDp@9!I}@rT-Y|}GZ#)Cl&f=}-5c8ZuHRxPA=PD&}Xs;~Az499`I02JN?3K#k)T*P>=$QmNx zLMRUicuA3&fS2iV!h_2S4I(XA@D^R7xB3yT(3|pYx+1UA1$hmpx~5=t6;h1V+sQJ) z>T97&;tk`lx=xqr0>;CQXTWR`E^Z?BO$9UPT)OGfLed$KV$5!GMQ@IYON(@gF?h7zppfDZNYY(K~dP-bd$sv(KgIvLe5Q z&c0ArW&%q}{J}1A_#iZ<52E2vp$fcvawXEP)TK=qCgU+RvtDFE*%(WJ?p*1ERHR&#Mt%;+BrHn^Jo0{@N*!u|Lk5bNe3449Y`TZb1 z2l2-QfvOV^ctZx3Fl3jye1BAqZ+yo0PJHjl;>MDWZz)+2HN0plpIrDpoy6wLA8uNS z%tXcaaX=Q`TZ#%%A}vT_;5JsmowIyMSien&cTQX%r7rnmoWaf1_)C1z&p-E_lfO+5 z-9cp$(f**1yW##Oj)wb{;5NZ?fbiS%A2Xr#$o1U#+h?egCJqVX^DIBhnCm~}!2_PT z`1#E$o_y8MQ?=D-*E~C@wVLGke#@tz>o=V>3R>iMn?bGN(7Ijoy1rAq61^Td*r;_n zwdQ>a9<&{*HOaFdw7LN`?Dl!OQCpq6)(n=NYS8j|<=2{>TD3FxZKsOx2ak$v8Sx{h z|6eAz1K+V5IL=U&qK8snw5*~>SYNcP_^H`*=5*)QwnqAu?Jmzfq Y4_LDAhZ}v`j0bDzsoZmB^1pHVKlwv9oB#j- diff --git a/src/wasm/Hacl_Hash_SHA3.wasm b/src/wasm/Hacl_Hash_SHA3.wasm index befbda8d6f38322d8ac33a016a4e513db41f558c..8104d0a63bc6866200188517709febed49ef43e3 100644 GIT binary patch literal 55480 zcmeI5X>?UblA!O?0zyv$u?dMykpi*rU?hxfcwl3&vBBU4uR?%WB!OfhY``qC%T!U- zU0v?YRb5M$ySuu}y|~NWGiRoz=MTST{L?@EZ~W6UbLO%$6|b*l6EKQXiso%G$g=nHEM_ z66SF7Z2!r@-V>KjpY9*+y?E|we=Z>+EP;#lA4SnbWE z_JeWSyJJRUv8j3Htd`kx=FXenx}dFn;iAP$mUf8gT(-Px#mZ^#YplAPg;G)s{v@Th z>xK{{CcUCG_O*$dzQ)yo$J^{TKVkl9tM>9pTCB zO8BXBXZkOWC37ojy!iU1{(ib(URy=}Cm2(My+i#MlKBfNP-nH76w_LXId@T;Vz571 zu(Tpg--(OEqbGXLUARD}GZfn@liGBfxAY|KxtJ{vY)=;EV7B&d*|IHJl#e0J;#|zu zo^8pJT+9QTwDG>hK%vKY6lmuy=HL>}0Zh!YCu>hAt#s6Gj=maOz~TVuqW6 z%-h(L_nN|`rU8c88UueT6 zDHVoF4MR3C(83%wb*5hGq<%Y*8>C@R(w!tSy;K-9(`$G0k~~?r+s2j#LJdO$(y%%S zB1Jhg73vGcqBNKpl9)zmB&I3K3@K`+jim-Hyf8GN;3mLL-AM!BmH^xgxVbxN0o)dV zX9AwtowNbAHSn3t0z9ib5x}d0*e!rtx|3CaHwEC?fM<6nn*jF&;5mTjbSFK4?<4_y zF7UbC$!_4Egz$O5=XEFhfqxpp=L4VLog4=KSqN_h-rAiU1O9mkUjTeTcXAx~7a_b2 zcw2XJ7WkJTyd8LZcQOL}pF;RT;0wEx%fQDA0sV`BFX~Rl3m|TXh{YflcPFSlXTZKoK8=hz<}P-N^@v_%K8W2ng8h-@U}saNkN^B{{>Sx}F0(^+0IwSwSOp_}u;jqY%}&{wfghp*50;zbVd{=LA$~hDwzX)^- zNKFL!#x1kEDw<6h$J3Oxl_^VA+1ZqFJWW|wnQ~86b~a@kPgBNKg4(@R+1ZqFJWUzb zoRp8o*~xBg;3%Gy94M%|>~Wx=PO{g50%GM+1?|$#+LhN?ZcwZ+kI7>&4L2ptJ_Z1T zF_C?CEU(kl>+(`3GA#lZ|LZNJjhdPpb8p&VY#M5A#{ZWRNoLMU{D1Y+>S}9HOp6PX zrfEf%pQidE3q(_0k+rm`w#bUtRAb?VB5P1nQbc5!A+n#t^xxO}-L^c2w(FaxV-g_ri#BFAOU8!nkrTj6rFULd%SW zQq=KY%%OqN)ewP;pZ*QwePPXwC3lgo=iLQfU!EV5l$!SOr+MFK-OW-*eQ6p){43S< zUM5YW)Jo$%3ScxBrFf5{xk;MbXr@61b+*wmnVio|v0d2Yd(K6Qn#YgkCY?ddjT%Pv zOh)xA+OaVmdv=6NqnS(obA$Zn5b~pBcKYheQj8&T)hLmfwhOhM^JLyV)^onhclDg7 z+0KKGc>x`aWLwYqzHApMuk87Z_*Sz(Yd2!rq(*87$R5Kdx2RhOKr)P*cmXPl9&$ZNZUZP;7Fp#*p3kqC-`+} z2_2Bs5s)N=RB#j41YJ&Cq&zaj7^>PSohnBx6QdG&BAt;+w;V9?cmv>2rRxIRr7B%# z2_|6MO*2^m7+Jgxu&set=~eru}b#<2qa2Wy3Sao z+YVy8Rp~lQA6TK(cR)9YZmZID#wuM8h#srbb;c^)4iG!6O4k{ybPs}f(5iIwLs-U6 z5Ie0(M@NK+hd?}JRXTbiL_7=v@f4Mgt_Ts2fOy2Jbo51t*ac#jRq5!A5V0Esf*2|t zy%8dqS1OL7($O6uVh;#}GON<_F*zLLMhVr1U*umA)su()T2}93+>6K`sT&Qx2v|Ul5{U)4E!uDe#8$$^PC(~KKpzbCeO>WD1FbX()Vmx={qFPY0-zZ8i#YL0q6w?dLe8v zt0BEmt@LFeRfbff$sj00#{jB^lmV?8701(*s`zA)Ncmz_b~a@kPgAOX6s3HrDm$Ap zj;ASAnTt}sT$Pf_Bym zd7b6P$_n$Uyb9BB?8v;v0I*8mYj!L@s;M8%OP$oaQB_>1Vpgy8rfgOk>g&}aglP$m*b{S`pgMAVNS|P%L6qbJW>^}6Dt4JD`BVP)IAc` zFa3_NQ%cw=2s;%J)=x+Y>-U7YNO^?yo6~aIoKabO#GI8Hb572f0XYZYY%2B*%77V? zv*tX-oDYf_BIL!s!SvM^sTf1#Dt1Ty7vzF1(9-C|zF`?ot1cku;(UE|L`EFzAJ!6v zA!#@uX@ron?h()WU8FoRM$GGwGAg6WNiNFka#081Xe9QH0UlGaZ!{A7E&;xzV&7;a z_Pqi44Hf%FBeCyIz;CM9HyVk3mjPc^v2QdI`>p`KqGI1@B=%hed{xE1(OB$z3;0_q z_Kn73-!4A!eG?S>`lF=}!c^Z<)VDx=pr{W+6h*u)w?TcVsE2K9-eB7yL`iux|7PZbr3gukPx-vPxwKRp$e^*u#> z57g(1io` zX~~b*Y)^nFgtZ>;>r$djpd2-i==Tfe`&yeHXhuI^g)Hy7$@dEyb`8QyC^%}oKyoA) zijD+B*^ywC@Cb}%j>!QO6Sj`mbLQP&wayEoR!72aRb;baYAPR zLK=fhQCJ+)rdHuAh^nbo_zFOz{!2|=5!0rwh^nboPK#6jm8PzUX;W83)zm6j#;N~W zQ&+^asVkyt>WT_N`QK>jFs6KHn?8;!f9Zi?9p$e)FeF|6+5C2hQI@pA$>}*3Ah}WWwK^Ok&CO7X|r6#@fbf+!~oL2(c2eMA=>WUr@lH z`GpQHUNhcC6h* z*KYOERy2+D4@RK$*a>DEZ67Hz?UkFkNO{>84cprJoH1)f-LjU1=H$_|J*m?T>~bpG z6E@%sKtU?DwoMC^wUiptwG7yRv|#p0JB^TU1U5hiOiLb>oLSk9uuS5QR63qzW;1(j z$uz|rQn>x{lt~M)0hv1>vni&{jtxS-81~Bt?0$Jhs&DH#?2~7NtxD?5eA>PhtIdKv ziM8Cc293NxT6F|#J9G3Tu^~f*VEFlb=T(l%&-|dCo(*b0kR7qBuzh1kgZONq+X0mh z^g1(R853ckrM_y&e|DQ8UsrQu8bf|bRRLLsu7Iq-23w#DuTEMpE^?A7Es(QGITNSx zzRZ^lV$L)*F|w`sguzXBGS=M0Y0{D1#Q0%^rX!mHs54!feP_Bzd5z8(opgd{NdqRE znHVBxnK@9#FiR<$O~~u4E$OSLQZaU%Rt`Ca*Ibz!Pi<^9ZfZNj4#k9)pm~IJ$j$Rp zJGK5)W{u4eOU|#H+E{6%QKvS;T&K3R*|=0S6}oa(f;CFELs@$;wJ}r}CQ3(03;luyHeEF2JQM}G7X^7$;(d^na@J;&P2B3VR=7CYOmvdwx>hgB(n zA^_?tQD>IO5^O|^O$QwylrKR?b`bLOdTILVTU#+~cvqKSZfQHc^4XywStn&-07}^* zumS0e>e^YZYp2$=Gp8(e)@Ld^)2E zU0O<4UMb5lH?64TT>o8rxd<|L`wp^_H3&^|X2ezZw3wCV21i ztbV#APWTAc4%gZlYj2bpXvQ;K@xB;+lO7}a=$$>uGt#&x zc^)gxWEU~{G&e(Y^GEOeyqLR=++Tal!XA$KfAFcgoAg$T9Uhi*Sdo@P0O@d1NnlP8 z)`fZQWH%mS&s>@3`e*LfrGMr_^L*Ps9Ia8|%QAk>*c3Q!uz*trHGXBumgsiY8NWog zrLS(HGYvU|po~RVmwNbFy-Z_Sbdw#>Vv4Kz=$allaI0e_r_a(D&K~$<0~x0WHXt2N zB6ZpdA)q3cNqgxS2^N zn_cg-CVfrTiY-d@7Tf<@C%s9wu~=*!lx-Kx1CyRNN2g``km;s~?m^Q7@7%!_Q+Jt} zJ{S$zgwxXT5Pm3!pq=+M;2xHp@(@S0IasX&?h#pGcFBW=Bh;oOJ!TK`)t*wu(MyCp zM^}Vt5t#BuY-GV zo+~%#^YWB=L7wN_$3gQV^tRExf$NCHDYdY3nvLHi(s<8*?Wr{n9^zzC*jQLlI z{~Be#7L0jd1M=DcXI{!H7$HCAU(+%FT28qfsCY)l{HuBdL#3BqdDR@}*+=eum0D6C z#~rfq-dE1OPjm0f$sN!Mh9AcsqTzSShO^<Z}+?T)AiwZ|)q+hu^&(m%od8vjYGVd15w=~ghE%0_=Jy+x10FVI=%4E!*urH9f z6v*G|0s(#7o7js*^BtlT4aZ>khnXT+^NRoBU$a@(=IkHcIZocIb6|7)=2-(04o+3k zisClQvK0mA?38p}QJQi_(fzq~VdT)QC*3Y%KlCo!W%pG?-geo2Wr#;hlgzY?WENJP z7PfO|%k1?-g9kgJgMJG&6Y=M;p{fRtIerV?MWV~IQ`>$=c8*&0 z+)S`$()`jeuQ*#Kn_g=02+Smv29K-+Yu8)AU|tYR9&CN=+z+VbwQQ(ZP#(w&bR1*B zsG<^(a{1g5b)u;so!Ya!cL z?Pe+UU^^Z`9t;|+e5aglN+r}fnrvq?6koC}Lzg&8!P31+iw zcAN|UszLAKpB1}VoQwZvKbKsjyka-we80wQl`Uo)^Ya17e;^=#8zDbGx2CT?PsJD_ z=kb~Cjb>0-Z;kllK=jj_ds^gx>0)V3?1*`AXR+vwf0(e&gD*Zb@69h08pgKQA( zWantD>`+^P*3vx0=F!8n!^1&)K19e1TszZOKQt6$h@97+kC4wJnc7;aXKQL#I#4D& zF1lNGyMeMx^WQ~Ay8?nVq}+G6?+h0yhX~niN`T!iSav{EcfmCM8lKtdPz_~=Lp7A0 z4b@O~G*m-XCqwt>h|sOGY`mvtgS}K>Z*W4jL?}8F`lu)ia;X^g{q)br``NzVgv zNQ=h(YQQ{^Qw>1RK+rQmyJKpg#h!63MhVz(*Oc^JV+OhfL=P%v5Dq#XsfuP(#_=>| zZDq=%RoU5;aXd|_M;@axpRLNyri|lh%D75U`&?CaHf0=7Q^qwX<@0fNazqp(%BoWoaCDS2LFJ)@oVOkQV|&v3pVFTgbXI5@@tU@#_f%#P(pH1#7nsb#HV z*5+U)ooT+vs{7)Ic}cy5UY3_rb~f>P`--rWs4hk?D?fXgVfk{v&t6e5{OA?WR9&Q; z+Q=*B)so$c4R$fZQ$zyO#CVdDF$XWct1|rdKPzKW>G4W6w68yPzDAF2( zW{`Fo4BBZ(!8Fp4ZyOgWr)^{ir!r9wvb5X@hazreZYW;RVlPnag`n7BLOK+O{jhP7 z@`@ccBMimYWyFj!6fcs`#UP(iLVhUXN9JFBo*Fbn&go_|Mm}S~P&`j48;T<~R33_B zIuyrp@1;QvygW=g{*HMx|r|xA|^_|^2 z9dLW6RT3jhaf996{FR$V>}m2(T>=wc?y|Y*1;H-%De>sjeNXQi#Ac-v2bTLC%M69L z-Wq)Tv;(P-`EG%YQGLN5uMEd64}U%y?02kd&~K-%K{y<$Ghd=5QGdH@T3JV$2k|sVkytY89Y@Xr#s|uqsoVx+1Ek zR>3e%jo)BZrZ#m&R86hIVVwH+HFZTyo4O*ZrmkoTDE|XZ9mce7k7Z%9T#|KoRAAT| z*40sgVI}cq)Sine!qrEC^cCI7-|FG)$I%Msst8N+Ut>VC+Z-WGX4oA1nW-dSU>KE5b+ZwLOl`F zjVbGHXqopP>#Wz#FGtNI!a5})EqtI3gyv81nKgf^dE#1_<@qPp)zAwK#02RAz5p90 zT}}aj{!H`xGisWyFF#giMrfq1Y;)m|Xv1akfdB)Fzty7^H%{}jXk?VW;<&<9@@ED0 zldQoD5;sp>x|IqX=Lne(mD&%(+5;!-mw3{p0l&!GFzIrr#h|1|z=I{E_+z~x9_O+B1r!LcP?ux0-rx6Vw80e1Wb+Y8YDA-t$c| z%{NJXMTr&ucxHMIW0-zlJH^zg(ktPA`DXYR5kdKF;y($l=>0Om64O@PA-&E^8;F6th@cak9bJmB0#y&HIRNA-N* z^YvznVtlg&{!w}t^WTP%vDUxq1`f#Bl1Uuqe5Ld0?q zTzz*(5ub#JE)ZSz{t9{`M63Y8^>?3YiqAsCN)TLt_n9I-4-s5vu2am9l^Uv2DmQCOkYBKOJgaxfzFAn6sXwySRJE-Q4U^k&`S9nk!1n!l_ovF_Fke-Mx zgCC|hc51IcI>)1X^s%c9lU?#KisCL9o;+N(EA;4He+fBIjU74F0JIx|b_eZVV^y8q zpt;)`(jN@s_xJpp_Se%`^kUEOzE~JjC?k=Q`>h3P2jw&UOX=m-$&f1;V zS!KHd7ZqaF_T%7j1^|OGk;m;=#=bpZFo5SwvC6|z|L&(qxHD^RbaF&(QO?1rnO@9m zkQ&5@1VL2&G_y-{Cp_4LZ@1%4MdeOK1{q$PDR#{Y#=gYiZR5M9hbS-tUqw zHz1d53CpSZ@=`{Uy1MCr?D7NBMamUUA8;01gG=^?=u_AiOdXgf{@@irr0sLqT{W zU~bs$0epvrDkWtTaBkS$4LlNrHv{K}-TlC$kF{(8&JDYVfk%SyR^Z&QdkpyJVgB2I zbHVO$;E^Ew0C4WtJqtV%gtr4{l^g*c3BujLxn1`%@bOS0>jBu#kAiM(vm+Rgx zkRlR<9|XbWx(LE6V?lT)2rkz}5MCJz!ViIX$O^(MV?p>~5L~YNp{DpaqzacMU2s2E zL?j6B0>S0F2*UJ3m|{1G-Bu8$BSJ(8M9B)m^hAi*17eRAgz1V9u@}T%D+tpUA>vUG zeEk4Hn9c|hkAYyx`9cwqAiNL6J}U^*9bt;cL2yXI3c~xDLi>ZO+nNdK6!K37JdrJk zSGzXtS01)MXHqE-J0J(ri99hMe}Z!omWSaFHR!Lo&1Uiw0vIx$@C?O8$|+QyFdXK< zvFV^ay)l!_G$G?mEs8Vcpk=%ye42_rErp782j8cAr1!A8&S;*8l-@(+b|~OH>NBPE z9`^LQNO|-g=Cpxue&a}5?t~1Fn{FM$JEFxNq1Yoqu{ciI-a1O>CVV?PWrK&3P$(^-ZoAb&slETNCpt`Mes)>-Euz zg=z&YSaBxt!~(SSK}cU9rynQerdwa2pfzip)bOXFOhMC7rl4u4NmL|WvmN)?c zCn5q)5IeX}`lK8uLZqObv`jxEy9I{9Z z=0qN*SrxR?cT>=UZaO1)ap@E|W7ITq4g${w9bQ(@25>JrSJuP^Qw5D92pRZ1%+JmR zNar}gS$zzbVKOY|QP74}K|5bo(9X&QE&7~R<6KTP0F6MReIo zc7;K7C18hF3F)eH)w4hsDX;9SoQn{1O|GToPPpoDHo~nsSRn_EJ3Ena;p@(9Dc5Na zZAup@uh{E2Pc?hzDVCk`frBio@v`%jGQqO*lxuR$`EA6O!F=04#l2IYJ>CgTU2kbH zTYzr3V0i@sN8j+bnls8<<{MmShf@w0BhIDeaxbU`l(}C2>l7SCxB8d!N$YRpiLbM%k*b->I3> z-lw$pDeawIW#r|%&XXzay{y8ruuf_3nLpaVa`a>PvPorREysTagiuC%fXY>qE>wD z?+w`B^~(OaP}Q8;-rdKRo6Q2KSLo-k?nmw zU`+DZ)Jr`<-Wvd8l1JH%ZSNa_W0T(vJhHuS0*+07Kk&%*z8N?+`NO~?+xr&a*yN7^ zk8JN-fn$?D4m`5GZv&1^{w(mw_Wl5HZ1N+(Bis9S;Mn9Z10T0Cm~WhJ;MnBZfL#&W z-g`h`l*jhIBDTHn0D(~++xv>x_Rb}vYLv(Jz9P20bK9sIl>bx_k@mg^ z1V;JK6!Cdj#$FH@r<$%G8U$(!ibtbgEH*?2=GF}osMMa+~Ev{I1_yp`d(ra%hRi2A+Wzbpj1I}|u z!O(li)9WJT(R+xmy^7(RvT3;!GCYj*j^S}hz@WdnA#0{1+#R63b%f4Q2IeB=6?=qz zfF|tkN6m8x0MFAH&j;i`M~H^gte#C@eV&TJU|-Ed0XZ9f=H_!4!+mD@IBG*>%Lg`m z_wm$>o%^MDhB+pOvA-YV>Wgd%ehI71%h369K<7((CxEkhyqLcFCQt7NsLUTaoee)c z9L#@!Twb!pgOt8Ey+U3eJ z((lAdzfJ)@7fVKGspi=n+dCJP0{4fb2U6QR7X@VCA(&se@y&TTLk%#&dl=t_9m4j0 zL2d6tW!w81Ij=>Z)pl3Qe@0i@?!yo?9JG6rHB=9Ww)ZA#ST7mqmNZ%0dj`QKJpwR} zhBdn~o~Epg^0HR@%4jxa98XiKnK~+Sv?@EBGLEMy)utY$yjYc;O&Q12lyQ}yb}Y_L zMzn#Wc+MOj)ybLTqq;kDd{ilCjvv;}8qVvivhDp6R{^}Dvr`Aa8w>zFV=QmjvCKiA z0ejG=va4BRz&w?gFKf?;c~e)(%W^qoJ`=B!Sn1s=d0Dy7Wd_mZfE`{Tq^r&q&jMYf zoU*w+^eU^)Tk=*~?u4rjMtaBe+;s*N`&LlwHA2d?uF)RaE$N*HRgH0t8xESVzhCFt zfp$~Ab|7P)_Sylz@Z6AbW_bC~i{@*?q{^ufpt2!9TYavotv*|RuDf7){Q*bM@V9z? zvbn(>12d2g-b~dVtm<#3_X%Va1LleVzXp*g4frmwVRwr_Wowjh5sV5iero@_t!mk4W|GD&1YC zboVLU9o4~1>F!n`wu<_c?mnfvdmVL3cQ2+lTTQm^&e}7jyQ@N;X~Mr@y1Pv2?p2zp zsNMYwzJ-j|I%RjC(%q-*?o)Pm-Fuj_yQ{)IWq1Ge+1)oy>F!gy`;_jUSd?F%w$!Gmx~h8W(7fI!$2sw-L-VTOP92)p zBY#te=KlqU=5_Iz(%yfgwRgJc*L!UKpDRDqgKq&}3{uwy@6Fy)WB96)b77dqu|hKq z7Y4q1l=?_dC!~yNI`dLp4O}G5Mi#Rqm$?~IAPLh#>rULiBIZZpVkMo_m# zLh2RL=v@+Aq@086(x|ua^9>$O_v5C4|2tO$erS&2H zSE6=F`L6o8GB>UKglF#O$vig;=4##N(vNdXSwC{?ft>~O{48*h^6Ea1uM-qatF)>L zxr84&Z#LXgBc4UtWNV`pdV*{_+Mm z-zho_Jo1M!3K`^&EY&L@g4 z1Lw$9)@Cb#b5Q~Q@~yGI{3;M!Rd8Do?}aH=gW$3P{N-C?fB7{axUK+y`PR}0VT!dN zxUc|!`BuhxQ206!Tv@;@ZjJrr*Ms2F0{rD$V}JP#Ah@;we|h>L%xWVD?k&Jyo{k6+ zn?P`J!6%A{{N*=;;N}AS<>`tr#TF1;U4XwleGwwIg5d4~{N?G45U~vemlu4Yh{#|5 z0TA3?fWJK55vJGoU>&6OL9W*`p_M2LhpKR zvTd1OT2geQZnu!myRHpMDvsm?_D2- zXc$>)*-VF$)saIMiNP$+!zAimFLom1fz3K|xBVO=Miv=bD`iyiyYB(q4gdvr0PkvAPVC)v}UZlhx{7zq0II-|0S#vrMb8td!gCYanP% z*kaZdeNE_Hj~qZFW}sV=Snv7_g4g+4fN?aG_R4sgvNp<#l1Uc$!kZ=A)Dws@@&MI5^H_0Y0(DLJ8vyh99<;`|1bCcFU{!Ln6L38-yDjls*shm|usrl7W zYAd7sPzy@X0RK;6V0nS(f2tW+Uc>WF?ZEQdV)6DZ@C70i->%=^cXDXM(Xr9~zVqiU zoZ0YX|H+emgBwnd_V+(1Cx_3EoEz#JJ2!km`bS5HM`i5N=!O1MGBzxumoAK*JKry- z`_2tr8tre~=bo!NpFQ;Gp$FyM`H`Xi^Zggb#;=i|e%X6Izs3BP^JA^ir8cSI zHB#`eT*mif0B&mHSCl0?x{dv-yV0*TK}x^Pxna18~uwUO+zBosB^4xbL{rOX8EM8I5o&@>5{1U1Mnf~w zNI**?YA9(A^2LX-&-PqsAMEG%?1P_fGe2Pt_T6vZ^RxYIU%Wi|i_GlWy2t?sxR6A4 zRb*vGMn*(NMny)6>eiYigs}eclU7SiziaJSJNQZL?!Ig7S`Yb2?(XvQ6fVK*13N|9 zV*vCvm61+SoTgRmMRhe#(o9Q`n2f4WRYD|w`zdB0Lx)N2c8ga|enjcc;BSzTVO-4s@^We8cdzH)ng3CI$`CcQyqYt?#v zV_77F4MI3HL8niNRIo`oh@;u4Zi;m9aSVo&3!*F7WITvr`A%(lb?L_Tty{Im($>m{ zHIWIn?F+UA13#=aHbl0~()`V8v%0j^tTvj-U6K6L7Wh4q%iD$SVyWEI+t)uZI8+%P z866v+m?SyX^(WjQwQD)T4*Uq`+oYZOkI(R|LN8XA>q{?Ix9%*x@uG8jX?qhx)Rxw^ z>&=y=tvl7zA~zY|q+Z=>>K6IINQ>LG=2EqOTi64U2Sx;izDWJ8My)2gqqWN$n;(c` zf23`rwpDA2QWUi&T$H2rH&<@gwwj_hO5@gh+qD|F-&cv?zX2s#U8>jCMgLIb(H&+J zKN*NVS=nM%tk%TfSfr1tK6&c=(j72b48=Y^b#_5Nu0-*&1eQ*nIwOYTpPXF~Be73T zpFbl;W1pNoc}k3RkkMqRZ`K-X+s&F7AB{|+dSh#&absy^eZAHY6CG$ZD^CcDGJmzU zyj)!sGN<(x+MY~hVFp`bD*vDV&C2^THW`V;PG;8;b3(|hgVxDsDM!hxsb^JPVmJKX z5hRKwSXA{d-IYovU|;PKQRuRrdsZQBJHNCFDckwAC?sv?pG6^|>Mh&(+A0WJ<|zUE z`MP8ApDXP!{sc(b=LiKVkWE&3VuW&z<;omp)g?33MVSR53Rc>(Z4APb^{efF`x?B+ zC3gCD6FW9a(OeB@NH~Qn`02jVNbNe4X{XDUxdSIXPf5yON$nDIiD%&a+LAWe<;;kp z1EA)cg4}W9JK~}cWINWXv>{1M^qP?*bA>rhg(pRzQ3!)(4i#iKb;}}XNVdgP zqM|ILZ4Z$41dx^iS*r?f$-6ZJ@Dqu~0XCZfij)JHJh@)#-Q(o?s83I>m(liuj@|$r zeSmm!eP(RkC~oXN*^jvlP`@0cyc`0ap#YviKxRGz-mSsOPb3<5%m-B!;Hfyn?PWv# zfU<2RpU2u7reQfkma2B!S3IDRyXWv*VNI>X;D!#jtoNL6QN)$@MNl5JTzLs9$7oD5bd={ciV2K5zP5}3Vup^H$?HaO1dhpZ8kZ9^p@`y6 zpxB6ak|y^^4H0Q-CYhQ^P%{~zh5#`&WT?@N;;11hX+lonG7a(00MASS&om$--YM_a z;N&L~jpM{S3p}#{@k&4;@gAZ>@-W28)hu3hW$_+X;+Fbh?@&cavl&fYu-#uH;Nm4UOquj$Rm`HN9l-sl9KWma2*TadJ>Rf*irA+ z;N>S0jiql!&H6r~42SEj^5b-Tk25<#C-lsYGxFo0=6Hab6M%STCkzF;Q5*#)(BO4}^D7PT8 zgwFTRi87VoGFe&&zzX;{vJb=bITddh)WOkl*$;Cn%8LeMhUw#B;3#zt)60I3N7$T5 z$pP?wkOt(?qV`_~156&GLGWJjxx!3lNTNX}-u|do{>CVE`>g^>Z4_A(J14U2je`_l z(Un3Oo-t{;X)FY{obFKh-`s=BrxH6|Q2B9x)tCW%gn9sfz^ocj!}FpnbF8u6fI{YD z7EXR&FFH_ScF1|*u-}b8h!eV|7fylsS%6@np`M2h1yl>C+Xz!Pieox-^uP%&QC61W z8uw7C!@cfd_gcI8W$)Hd>ABib&4zlxX%8>$e(Kjt8;ZJWG}~uDKlGfJcE4HLx=|bh z`k}qQdub06e0*NoP;*||gY3R*gOFfb6Th_C4IhSRh676*buJCN0j&XK7y!fN%HbF) zQ8fZQBOyFlmquKE=P=p_$lw`aJa7g(*H}futw)bCtC8zu`InaoJLz66%$%;(?)aFT zga8QR!?E^MY!Z-J*Aw2Y;Wj^!Xe>JfX?UiVS3J)ox=mrMslZ%O=h9SYu2b!~PVro) zV#Xp#MK*_$a%>IZ#s{n+d*|8}_pKr7T$*)h+VQO6YX?T*ffW+XiJsO9>1j)%aw z{_w_L)#4|b-a~pL)*=U7PbPf?lFvMEAB&t0P`+O zz+Vao0d+2wuo-(40Wu0#Vj=KG*1?`4$4auiU#LAP`0np>o=r6MV8`=0-_u5oJYfOCxdx_pF6dRP2q`B>z_B@BA!VNRB_q$oPHa z1hDDwjPLlO&ZSB1k?OY&RGTfUgDtu%*uH9z?E^14%3`g#>0<`GR}kM zd8Leq7I73(aTC!jZ;KO%AqC--JbK`qdol*&k$oBNV|0X$!V5r*YrUM4$Bnv}XsSFB zGvNL#<4G(sI;o|PJ?SyoD^4-hr_SV6!;AN@v#P*#pPDKxb*8j(A*x&LQ*# z+j#*on{#brdNHK30I3SWvL=&5}v++{@1F3|-#N0(XQo~Az&4_-`FH3A`63!$#Ryje(F^4@UUdo2+SdGPdxWbz!qu1&Q1u69 zdB#X(f2;BuU4yqGUxI$T4E=bitsk#o!+4cm4Q&|TRj|KdycU3Y!+8C`se6N7r#H08 zc>Qs)7;l2+H3k1*Uz@Lqnn2c3Th?XP3(>j9ldolDnU`mV3@*V`I-oi*}$%((Opy~aBK4*gK; z`x3n)-$lbJuveq{UUdo2+WKB?W3MvyY7BN%-GKTn({f1tZYtPUzghrh^}BW8>UW!N z(QU1Mw;mVuy91iAn8&NPRZ2+=UhDoqr0!|V~t!|r5e>~jkk~W zh`+KCGvNL#<0f{_msao7rg2#qcwCg=Jwy9@N)Z|d&YLZTpBk&QwJw_nj((e(v|X3) z>rvicl|Q0aA8yO&{({|30D*=_mAKdY?XG{ri||a)&;U zyR;))7X3(mV!`x%Vg;-^ssS6jzU$w6hW%610Da@F;7?Jn1Gdq)m+$n*&$#nl9{sKr zuJ5@40T?o6P)}|4bJcok$3q-wS53F)K*X=IJpl`a0=qVK}N+T z9K*EnRN8nRoXj-Hc~IyPJ;)dd)_MmSIP^ehY6M}aVL1T~Bh2J+cpMN63p07lyEWYA zCn`?m^FoK54qWLc5$T-Vgpa|;x7XrBzxz_rgV%Wi$+T%pNjPgJ$%lIQ!(j}S<#a%t z>?O^F;FO$=Vc^WZ_7p_iXqKiOL5HxXjD*G$$f)wwi_g1M^o*vcRL)16&PSR)ffOlZ zY|iV9%^14p_eJ*+dIHg-BqhoeC!+l!4+YP_n)zb4F#nRNgr)Oskz@Of%CSAcDYjHV zhV2+9*ggr$jv}T7$~y8*{*1qZK)y-V>>OQ@d=n&kc*~jP&qpm_V%DU+TA`_euB{x>x>zOf$oh&tmy_$0SY~Sv8!erQL}%+w})!I6&mu zdC{p^Km^`+TPx>p@xGch+v&sew~#x7{&^?ux(h@Q9@>PUd!Veru(@BZ+dzc8l03{Lnv*wc0+%6U}Z@-;Fj+*da zab67aO!Kfq+WurXUmaG&GdD3GrUYKl)!QlOiz0JrRc4{bu=t+6;RFtNpp-N3$+{6J z^D&qcjkWQt+!u9ar{S)XFD%-m?IQfZM?P zRKi+JsA{);WlomEHXw={p2v7}*ZD0cFs2hbgQ4$>a=ag`V|KV~DyvZdlaN{B8Py~p z<*74K$Ro5kYn8uof+2G`&sAn1kPKzLbA5QH;1)ags!NeYhM7D}WIhU=x4|MqlF+Kj zD)LzkyTnMTY}l(C#SsD1MLo#!D8g8A3aTv!Ff+LIp4j>U8L=Vh!Q2|W405Jl=j3|X zz=DDlXyln8)o+h88{*_Uo|(>R1vMRsufRKGDDV8*302M85JtMI`H<*$e7fV4E{RAp^wB=u@i7>Nl9c$H!uwr2X6S!NG)>cc6tBwhVDV0~ z?o5N7(~hrfGh7Yv&KObXMox@^&B$4rLYCkxf7jFTjm=^H#zu?xkaufv@)J21gA-Nr zpl3cH-dR8)@#+t9@Xp6#g?QDKZOsuS-gy=;KK|)k5Re~SKfDSBpV@zX7v`obvD2;G zNNdOQ!nMIsc4rfIhdt#y&xg4K3!79Xr5n_CQ_eF&u&|G4QTzzZ?`u(f3`R|Y&X8}k z*=sQyT_5}y2Fz@zkGDzBr|H1gV?GFk4Z(DSwqQV+2&FIm-fh$_bl_t_@b&{M4Ov7V zv#o*!dYCf%U0`95c+ASIzdZzfuw95kypUKF|XzAj|gnIKG}6B zad)(^`(Xxwx7ZB`%i0&5kTaMoRS(gLMD+1yrR>b0B1@SxtAA^}}k+)53zj#-r zRC#hl0mz^$gJb+!fG~tNsQn9m-xK(J;eUlYW8wNwmOGQc^&b-MOwzVL=P&;)G_b!- z@`v8X-e@*z)wPxN+sCe{54Ml-_xKlSd1GyJrCx2WY^+nQ(b#BEbGxx#yGhLrYHY7J zSJrBDtGZI(Zq)Ku^mT6P%~zj)^&+jTZPsgRwe=>gRoAzxI4-QMZ8Sb$sCrvHn3@L( zLL^bTQ1U4JP4@uG7|IL^M8<3p36vtrC<-_w&ZA^ehEa~7JdM(eav0?@NOloSdRFoe>F(v4C;;lUXp6EKL9Lt%oMc>ROxr!9tr__C!g zEy4eVXnmz&Zl~@(G4(lJ-}(}b_}@K!5vXtREx4hs=&!F_iMlUZ`kFM?gt_XAXywg~ S>ECLP*D~$ebYwN&miT`OJNJ{!AQsYpna|G*Dmg9IyfED-FvW5YSVp3KcW(xFPlG>T#$-_PH7e0=Wbo6F30 zU15wV&p$diV5LXOzOw&F=_+0M&wi+TB&OkTw<{fz7zIB#b~H-#MhHJ)(9d8-+>N)6 zI1ByxF=nmC9+<9Kx14a@s)v}SzK;*J*=ePTS$9~)}5TeMC zyv$NJ9j94kAsH}2W^I*)WyoonqnC-%GOIOfn?+=#CL=x9Y_X_3JVI+_oz2LQvob?< z*Q(ZxtP0|Cbvz`G}@l&df_e$e|doQP9>6RDxJw{ zxqP8G^C?|Yx=eqOQe(e=fJF`eycQ3f%c@hG2xD9o(mbo-Iq1;OzEobm6` z6+P_eamNq#GzITT@ie3$jrZ^@WFd+tctDOg9%AcE!$W+X6(P&D2{>|)14bM

3T3 zA>znEm^gAgug~)WCjGVIXN_q|8BBfF&qI@BogP;wm8U?36xBKd8HiAwGF0+794% z4`QRT{%*imn)R^JB zI5ndBGo^7qa#u9}K?vtdN{gN9g}UOa@%x|Gfy z1XO??hj;El`Z|+9sM}{qDN#jLAx}3VabbU&0IgJl5g7REH&u zl$u|P#lzE}CN#nkJluY#kFs9?;1`Mg60%>C*e_xfuwOVl4cKXfBY2qo%u)7bSm2k5 z{R*;Qk=QR|6tFKJo(AkR!Vx^oe)cH)RXE455&Lyyzb>&~!zf_CdUzVJ(+EfK5IYjV zJTNUm^;h<|Its&pyrkbe>TjP5tGRk;HQ4DYs1tTN+K=dTl&lGllAZ%U=KkwV1-T2* zkpu!$!9^BLG5qLHD2(CjZN7||5V2Eqa~N|+XHD2%JJ35C&qgm8|8 zb10mXg!34Ya6S+gpi#nkULfHj6eko$`w_xL5-y@}Q4%g;M8c&&Sb#TK`J{6U*V~pPz8l$&& zQ@Tp=3ap4OqIan;(AwnY;DsB57b*i6bl|JFkoqC%FQf+C?U#Xn@@RPELMncAAw^$B z`hD#SDd1U5{%4hv|4+WH{__PIx5Ac~`XWXDJmY`p?cEA(Zd8KJjmqHW#s*ZI^_K0J zp51JK<+@E5ypG$jw!v$H+i7@q-GUv{b~>(=d|Mos7wdMrZ8si+_oQWk-2lgY((HIp zH(MLQ;m7snIbV!0qFv`-CF%u|M!~ YOX}@q`dgE)n9XbTOk-d(^nc0mUl8-OLjV8( literal 0 HcmV?d00001 diff --git a/src/wasm/Hacl_Impl_Blake2_Constants.wasm b/src/wasm/Hacl_Impl_Blake2_Constants.wasm index 8bf185786ff4f80282f9b5fab198c566c1389795..1346057cfddf04eeabb7caf276367966e7b83db4 100644 GIT binary patch delta 25 fcmaFM{g!(H6Jye3MkW;?sm7$tn6%l8X)!YZWRM2= delta 25 fcmaFM{g!(H6JzmYMkW;?sm7$tSiISbX)!YZX7mRv diff --git a/src/wasm/Hacl_K256_ECDSA.wasm b/src/wasm/Hacl_K256_ECDSA.wasm index c0e66ff8b7bcf3c7388a31c31c4dae96b1574d3f..5022a27e5630c9183d0da04577ce3008c7d63a43 100644 GIT binary patch delta 96 zcmeDA&wBMA>jG}ZBbyny9bYounC$%J>}090=9`1Qu4kIe{lk2-^N+$T#_QX+CNZ)z wO;^rfG~fO&g;AD~@%VO`G{(P-5TWP{MrKCF2ixN_K|*QSKvnay8Q%#50Q~YNC;$Ke delta 141 zcmccmkG1DN>jG}Z-|eHcr2KlNi~VIHaL6)4!!NT5i9S!YIeccxn6ZRK~xIXmWZPj4X_dueTd#g5+dj P`t-7a`qHu)-wOi(bqX{3 diff --git a/src/wasm/Hacl_MAC_Poly1305.wasm b/src/wasm/Hacl_MAC_Poly1305.wasm index e72930c86ee20895edb23cdd78e033307206b896..c4e38920109f62e96072a3c0e6aadc2dee6f1cda 100644 GIT binary patch delta 295 zcmX@?b=YfyJTr5B{X{XjiI#j5{mQtKQcLo4QjLum7$?4$XE8KmV4A#&QIU~(;(JN1 z#GK6Zym&Jc29}BE6c|}23o&XkvQ4&Olw)L{T*s)r`7~n*BO}LT2WC-j7Dom}Rz)@i zRt2`nj0)D9XEQHnhDb58Opatv5a*C`WYAz>a1?N4%mE<~Crf~3^96PTP9C5nR9t{{ zGLK*|NI(*#iX#UokS!Pv5>SBX1PYuJ+yXSUS;&%+b@N`Kn=Fhho2QGvU<6VPlGj00 tgmf#2x+il3L~WPT1Ud7eyeLrDcloa{_GV^99!8*71eEmQLSag8nE>=`NRa>l delta 301 zcmX@?b=YfyyewlqV|@Z+9aBA!1T*0TbAA0pn~upkjGT;&lg$_v7?~!XlV>qDVql&e z&Zx=AGP#jaj*)fZIVDE6$qN{@8QCX(RNQ=;v4oM4eX;|yC^w@cgCeUUn*yr>+hj%s z>&>&7mor18faXQACy294IWlN4FgOZ0GUk8~h?6D2xcLIR0VfYo5-KjhG?_;*7$hJG zQpJ%26v!3~2MH)ZbOHs=32p(J+AL(r$h3K{&`lOb#?8~kUoZlx2FdFnDnhyyMBS6Q n0iw3cX@Z>jP+k%06{7<)6bA`c@Y>tq2XeYjAV(px3~{f9|a diff --git a/src/wasm/Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305.wasm b/src/wasm/Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305.wasm index bdfde53755a00c6d8c43b1ab7f7cea670bdde001..23ece19731facda0bb035ecd71b20dee35b54443 100644 GIT binary patch delta 109 zcmX@ff0BQKJTr5B{X{Xji4Nrxe{(W2PWEF|Vq}^;mr;?CdEz`JMwW>O6&P73vodNj zvQ5@!lw)L{T*#=sc^_j8BakW0tP3WUC+9G80a*{2%~&5XX9=)R-UOC;z--0HzFC@O G9WwwhA{&VS delta 160 zcmX@ff0BQKyewlqV|@Z+9aBA!1T*0TbAA0po9fB(jGT;&lQkI?7?~y>lxHzEVql)^ z#i*&k!X2JioEuzPP>^3#B7nq>FDXhaU|^kiP>GRk@?=JBM)rwM6gTf diff --git a/src/wasm/shell.js b/src/wasm/shell.js index cc877fd1..28a02c3e 100644 --- a/src/wasm/shell.js +++ b/src/wasm/shell.js @@ -1,7 +1,7 @@ // To be loaded by main.js var my_js_files = ["./test.js"]; -var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2b", "Hacl_Hash_Blake2s", "Hacl_Hash_Blake2b_Simd256", "Hacl_Hash_Blake2s_Simd128", "Hacl_Hash_Base", "Hacl_Hash_SHA1", "Hacl_Hash_SHA2", "Hacl_HMAC", "Hacl_HMAC_Blake2s_128", "Hacl_HMAC_Blake2b_256", "Hacl_Hash_SHA3", "Hacl_Hash_MD5", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_MAC_Poly1305", "Hacl_AEAD_Chacha20Poly1305", "Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305", "Hacl_AEAD_Chacha20Poly1305_Simd128", "Hacl_AEAD_Chacha20Poly1305_Simd256", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HKDF", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_HMAC_DRBG", "Hacl_Bignum64", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"]; +var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2b", "Hacl_Hash_Blake2s", "Hacl_Hash_Blake2b_Simd256", "Hacl_Hash_Blake2s_Simd128", "Hacl_Hash_Base", "Hacl_Hash_SHA1", "Hacl_Hash_SHA2", "Hacl_HMAC", "Hacl_HMAC_Blake2s_128", "Hacl_HMAC_Blake2b_256", "Hacl_Hash_SHA3", "Hacl_Hash_SHA3_Simd256", "Hacl_Hash_MD5", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_MAC_Poly1305", "Hacl_AEAD_Chacha20Poly1305", "Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305", "Hacl_AEAD_Chacha20Poly1305_Simd128", "Hacl_AEAD_Chacha20Poly1305_Simd256", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HKDF", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_HMAC_DRBG", "Hacl_Bignum64", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"]; var my_debug = false; if (typeof module !== "undefined") From fbcb7077b681abbd571c24f09d90cd3340739fcb Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Fri, 10 May 2024 18:28:50 +0200 Subject: [PATCH 06/10] update docs; remove keccak from ocaml --- docs/reference/hacl/hash/sha3.md | 4 +- ocaml/hacl-star/Hacl.ml | 10 ----- ocaml/hacl-star/Hacl.mli | 12 ------ ocaml/hacl-star/tests/hash_test.ml | 66 ------------------------------ 4 files changed, 2 insertions(+), 90 deletions(-) diff --git a/docs/reference/hacl/hash/sha3.md b/docs/reference/hacl/hash/sha3.md index 1bbe5e3e..6993ceb1 100644 --- a/docs/reference/hacl/hash/sha3.md +++ b/docs/reference/hacl/hash/sha3.md @@ -114,12 +114,12 @@ SHAKE128 and SHAKE256 have a 128- or 256-bit security strength and can produce a `````{tabs} ````{tab} 128-bit security strength -```{doxygenfunction} Hacl_Hash_SHA3_shake128_hacl +```{doxygenfunction} Hacl_Hash_SHA3_shake128 ``` ```` ````{tab} 256-bit security strength -```{doxygenfunction} Hacl_Hash_SHA3_shake256_hacl +```{doxygenfunction} Hacl_Hash_SHA3_shake256 ``` ```` ````` diff --git a/ocaml/hacl-star/Hacl.ml b/ocaml/hacl-star/Hacl.ml index e6bfbd45..e537a4e7 100644 --- a/ocaml/hacl-star/Hacl.ml +++ b/ocaml/hacl-star/Hacl.ml @@ -134,12 +134,6 @@ module Keccak = struct (* Hacl.SHA3.shake256_hacl *) assert (C.disjoint msg digest); Hacl_Hash_SHA3.hacl_Hash_SHA3_shake256 (C.ctypes_buf digest) (C.size_uint32 digest) (C.ctypes_buf msg) (C.size_uint32 msg) - let keccak ~rate ~capacity ~suffix ~msg ~digest = - (* Hacl.Impl.SHA3.keccak *) - assert (rate mod 8 = 0 && rate / 8 > 0 && rate <= 1600); - assert (capacity + rate = 1600); - assert (C.disjoint msg digest); - Hacl_Hash_SHA3.hacl_Hash_SHA3_keccak (UInt32.of_int rate) (UInt32.of_int capacity) (C.size_uint32 msg) (C.ctypes_buf msg) (UInt8.of_int suffix) (C.size_uint32 digest) (C.ctypes_buf digest) end let shake128 ~msg ~size = let digest = C.make size in @@ -149,10 +143,6 @@ module Keccak = struct let digest = C.make size in Noalloc.shake256 ~msg ~digest; digest - let keccak ~rate ~capacity ~suffix ~msg ~size = - let digest = C.make size in - Noalloc.keccak ~rate ~capacity ~suffix ~msg ~digest; - digest end module SHA1 : HashFunction = diff --git a/ocaml/hacl-star/Hacl.mli b/ocaml/hacl-star/Hacl.mli index 72c0db59..d00723b8 100644 --- a/ocaml/hacl-star/Hacl.mli +++ b/ocaml/hacl-star/Hacl.mli @@ -408,12 +408,6 @@ module Keccak : sig val shake256 : msg:bytes -> size:int -> bytes (** [shake256 msg size] hashes [msg] using SHAKE-256 and returns a digest of [size] bytes. *) - val keccak : rate:int -> capacity:int -> suffix:int -> msg:bytes -> size:int -> bytes - (** Direct access to the general Keccak function, of which all the SHA-3 and SHAKE functions - are {{:https://en.wikipedia.org/wiki/SHA-3#Instances}instances}. While the library - does run some sanity checks for the parameters, users should be extremely careful - if using the Keccak function directly. *) - (** Versions of these functions which write their output in a buffer passed in as an argument *) module Noalloc : sig @@ -422,12 +416,6 @@ module Keccak : sig val shake256 : msg:bytes -> digest:bytes -> unit (** [shake256 msg digest] hashes [msg] using SHAKE-256 and outputs the result in [digest]. *) - - val keccak : rate:int -> capacity:int -> suffix:int -> msg:bytes -> digest:bytes -> unit - (** Direct access to the general Keccak function, of which all the SHA-3 and SHAKE functions - are {{:https://en.wikipedia.org/wiki/SHA-3#Instances}instances}. While the library - does run some sanity checks for the parameters, users should be extremely careful - if using the Keccak function directly. *) end end (** SHAKE-128, SHAKE-256, and the general Keccak function diff --git a/ocaml/hacl-star/tests/hash_test.ml b/ocaml/hacl-star/tests/hash_test.ml index f27d779c..30f5d2ec 100644 --- a/ocaml/hacl-star/tests/hash_test.ml +++ b/ocaml/hacl-star/tests/hash_test.ml @@ -247,69 +247,6 @@ module MakeBlake2Tests (M: Blake2) = struct List.iter (fun v -> test_nonagile v name reqs) tests end - -let test_keccak () = - let v = test_sha3_256 in - let test_result = test_result "Keccak/SHAKE" in - let sha3_256 = Hacl.Keccak.keccak ~rate:1088 ~capacity:512 ~suffix:6 in - let digest = sha3_256 ~msg:v.msg ~size:32 in - - let output_shake128 = Hacl.Keccak.shake128 ~msg:v.msg ~size:16 in - - let keccak_shake_128 = Hacl.Keccak.keccak ~rate:1344 ~capacity:256 ~suffix:31 in - let output_keccak_shake_128 = keccak_shake_128 ~msg:v.msg ~size:16 in - - let output_shake256 = Hacl.Keccak.shake256 ~msg:v.msg ~size:32 in - - let keccak_shake_256 = Hacl.Keccak.keccak ~rate:1088 ~capacity:512 ~suffix:31 in - let output_keccak_shake_256 = keccak_shake_256 ~msg:v.msg ~size:32 in - - let keccak_256 = Hacl.Keccak.keccak ~rate:1088 ~capacity:512 ~suffix:1 in - let output_keccak_256 = keccak_256 ~msg:keccak256_test.msg ~size:32 in - - if Bytes.equal digest v.expected && - Bytes.equal output_shake128 output_keccak_shake_128 && - Bytes.equal output_shake256 output_keccak_shake_256 && - Bytes.equal output_keccak_256 keccak256_test.expected then - test_result Success "" - else - test_result Failure "" - - -let test_keccak_noalloc () = - let v = test_sha3_256 in - let test_result = test_result "Keccak/SHAKE (noalloc)" in - let sha3_256 = Hacl.Keccak.Noalloc.keccak ~rate:1088 ~capacity:512 ~suffix:6 in - let digest = Test_utils.init_bytes 32 in - sha3_256 ~msg:v.msg ~digest; - - let output_shake128 = Test_utils.init_bytes 16 in - Hacl.Keccak.Noalloc.shake128 ~msg:v.msg ~digest:output_shake128; - - let keccak_shake_128 = Hacl.Keccak.Noalloc.keccak ~rate:1344 ~capacity:256 ~suffix:31 in - let output_keccak_shake_128 = Test_utils.init_bytes 16 in - keccak_shake_128 ~msg:v.msg ~digest:output_keccak_shake_128; - - let output_shake256 = Test_utils.init_bytes 32 in - Hacl.Keccak.Noalloc.shake256 ~msg:v.msg ~digest:output_shake256; - - let keccak_shake_256 = Hacl.Keccak.Noalloc.keccak ~rate:1088 ~capacity:512 ~suffix:31 in - let output_keccak_shake_256 = Test_utils.init_bytes 32 in - keccak_shake_256 ~msg:v.msg ~digest:output_keccak_shake_256; - - let keccak_256 = Hacl.Keccak.Noalloc.keccak ~rate:1088 ~capacity:512 ~suffix:1 in - let output_keccak_256 = Test_utils.init_bytes 32 in - keccak_256 ~msg:keccak256_test.msg ~digest:output_keccak_256; - - if Bytes.equal digest v.expected && - Bytes.equal output_shake128 output_keccak_shake_128 && - Bytes.equal output_shake256 output_keccak_shake_256 && - Bytes.equal output_keccak_256 keccak256_test.expected then - test_result Success "" - else - test_result Failure "" - - let _ = test_agile test_sha2_224; test_agile test_sha2_256; @@ -349,6 +286,3 @@ let _ = let module Tests = MakeBlake2Tests (Hacl.Blake2s_Simd128) in Tests.run_tests "BLAKE2s_128" blake2s_keyed_tests [VEC128]; - - test_keccak (); - test_keccak_noalloc () From 1f94b2f3fbb28c0497e8c414b7b5b1bde67a3144 Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Fri, 10 May 2024 18:36:33 +0200 Subject: [PATCH 07/10] remove js keccak --- js/api.json | 68 ----------------------------------------------------- 1 file changed, 68 deletions(-) diff --git a/js/api.json b/js/api.json index f6fa0e60..aed89076 100644 --- a/js/api.json +++ b/js/api.json @@ -789,74 +789,6 @@ "return": { "type": "void" } - }, - "keccak": { - "module": "Hacl_Hash_SHA3", - "name": "keccak", - "args": [{ - "name": "rate", - "kind": "input", - "type": "uint32", - "interface_index": 0, - "tests": [ - 1088 - ] - }, - { - "name": "capacity", - "kind": "input", - "type": "uint32", - "interface_index": 1, - "tests": [ - 512 - ] - }, - { - "name": "input_len", - "kind": "input", - "type": "uint32" - }, - { - "name": "input", - "kind": "input", - "type": "buffer", - "size": "input_len", - "interface_index": 2, - "tests": [ - "4c6f6e6774656d70732c206a65206d65207375697320636f7563686520646520626f6e6e65206865757265" - ] - }, - { - "name": "suffix", - "kind": "input", - "type": "uint32", - "interface_index": 3, - "tests": [ - 1 - ] - }, - { - "name": "output_len", - "kind": "input", - "type": "uint32", - "interface_index": 4, - "tests": [ - 32 - ] - }, - { - "name": "digest", - "kind": "output", - "type": "buffer", - "size": "output_len", - "tests": [ - "9f3afe7d35d9bbc4efd98252357e73e85ce1234a48603a063bb7079174aafa68" - ] - } - ], - "return": { - "type": "void" - } } }, "HMAC": { From 45a199642d3779687e83c74feb00321020f3843c Mon Sep 17 00:00:00 2001 From: Aymeric Fromherz Date: Wed, 12 Jun 2024 16:29:01 +0200 Subject: [PATCH 08/10] Trigger CI From 0932a808a5db367f6dc0fd212697cd515a658eba Mon Sep 17 00:00:00 2001 From: Aymeric Fromherz Date: Fri, 14 Jun 2024 15:59:23 +0200 Subject: [PATCH 09/10] Fix sphinx version to work around build issues --- docs/reference/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/conf.py b/docs/reference/conf.py index ed778ab6..ebd5ecdd 100644 --- a/docs/reference/conf.py +++ b/docs/reference/conf.py @@ -24,7 +24,7 @@ author = 'Cryspen' # The full version, including alpha/beta/rc tags -#release = "" +release = "5.0.2" # -- General configuration --------------------------------------------------- From 26aa6abda09cae9541e037d10579d3375d142e8b Mon Sep 17 00:00:00 2001 From: Aymeric Fromherz Date: Tue, 18 Jun 2024 14:13:49 +0200 Subject: [PATCH 10/10] Unpin breathe version, force sphinx version compatible with sphinxcontrib.applehelp (#470) --- .github/workflows/gh-pages.yml | 2 +- docs/reference/conf.py | 2 +- docs/reference/requirements.txt | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 8f023e77..705386ca 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -30,7 +30,7 @@ jobs: - name: Setup | System run: | - brew install doxygen sphinx-doc gmp ninja node + brew install doxygen gmp ninja node pip install --upgrade pip - name: Setup | OCaml | 1/2 diff --git a/docs/reference/conf.py b/docs/reference/conf.py index ebd5ecdd..ed778ab6 100644 --- a/docs/reference/conf.py +++ b/docs/reference/conf.py @@ -24,7 +24,7 @@ author = 'Cryspen' # The full version, including alpha/beta/rc tags -release = "5.0.2" +#release = "" # -- General configuration --------------------------------------------------- diff --git a/docs/reference/requirements.txt b/docs/reference/requirements.txt index 1a76db7d..fec8b7a5 100644 --- a/docs/reference/requirements.txt +++ b/docs/reference/requirements.txt @@ -1,3 +1,5 @@ +sphinx >= 5.0.0 + myst-parser sphinx-multiversion @@ -5,4 +7,4 @@ pydata-sphinx-theme sphinx-book-theme sphinx-tabs -breathe==4.33.1 +breathe