diff --git a/include/Hacl_Hash_SHA3_Scalar.h b/include/Hacl_Hash_SHA3_Scalar.h index d0b7f253..63cf8710 100644 --- a/include/Hacl_Hash_SHA3_Scalar.h +++ b/include/Hacl_Hash_SHA3_Scalar.h @@ -71,15 +71,12 @@ Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( ); void -Hacl_Hash_SHA3_Scalar_shake128_absorb_last( +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( uint64_t *state, uint8_t *input, uint32_t inputByteLen ); -void -Hacl_Hash_SHA3_Scalar_shake128_absorb(uint64_t *state, uint8_t *input, uint32_t inputByteLen); - void Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( uint64_t *state, diff --git a/include/Hacl_Hash_SHA3_Simd256.h b/include/Hacl_Hash_SHA3_Simd256.h index d231e273..25c1a166 100644 --- a/include/Hacl_Hash_SHA3_Simd256.h +++ b/include/Hacl_Hash_SHA3_Simd256.h @@ -153,7 +153,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( ); void -Hacl_Hash_SHA3_Simd256_shake128_absorb_last( +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( Lib_IntVector_Intrinsics_vec256 *state, uint8_t *input0, uint8_t *input1, diff --git a/include/msvc/Hacl_Hash_SHA3_Scalar.h b/include/msvc/Hacl_Hash_SHA3_Scalar.h index d0b7f253..63cf8710 100644 --- a/include/msvc/Hacl_Hash_SHA3_Scalar.h +++ b/include/msvc/Hacl_Hash_SHA3_Scalar.h @@ -71,15 +71,12 @@ Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( ); void -Hacl_Hash_SHA3_Scalar_shake128_absorb_last( +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( uint64_t *state, uint8_t *input, uint32_t inputByteLen ); -void -Hacl_Hash_SHA3_Scalar_shake128_absorb(uint64_t *state, uint8_t *input, uint32_t inputByteLen); - void Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( uint64_t *state, diff --git a/include/msvc/Hacl_Hash_SHA3_Simd256.h b/include/msvc/Hacl_Hash_SHA3_Simd256.h index d231e273..25c1a166 100644 --- a/include/msvc/Hacl_Hash_SHA3_Simd256.h +++ b/include/msvc/Hacl_Hash_SHA3_Simd256.h @@ -153,7 +153,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( ); void -Hacl_Hash_SHA3_Simd256_shake128_absorb_last( +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( Lib_IntVector_Intrinsics_vec256 *state, uint8_t *input0, uint8_t *input1, diff --git a/src/Hacl_Hash_SHA3_Scalar.c b/src/Hacl_Hash_SHA3_Scalar.c index 7393ebf2..6d6806a3 100644 --- a/src/Hacl_Hash_SHA3_Scalar.c +++ b/src/Hacl_Hash_SHA3_Scalar.c @@ -2526,7 +2526,7 @@ Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( } void -Hacl_Hash_SHA3_Scalar_shake128_absorb_last( +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( uint64_t *state, uint8_t *input, uint32_t inputByteLen @@ -2730,331 +2730,6 @@ Hacl_Hash_SHA3_Scalar_shake128_absorb_last( } } -void -Hacl_Hash_SHA3_Scalar_shake128_absorb(uint64_t *state, uint8_t *input, uint32_t inputByteLen) -{ - for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) - { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; - uint8_t *b0 = input; - uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * 168U, 168U * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws[i]; - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = - state[i - + 0U] - ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); - uint64_t x = state[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; - uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; - uint64_t temp = state[_Y]; - uint64_t uu____1 = current; - state[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); - uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); - uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); - uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); - uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); - state[0U + 5U * i] = v0; - state[1U + 5U * i] = v1; - state[2U + 5U * i] = v2; - state[3U + 5U * i] = v3; - state[4U + 5U * i] = v4;); - uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; - state[0U] = state[0U] ^ c; - } - } - uint32_t rem = inputByteLen % 168U; - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; - uint32_t rem1 = inputByteLen % 168U; - uint8_t *b00 = input; - uint8_t *bl0 = b_; - memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); - uint8_t *b01 = b_; - b01[rem] = 0x1FU; - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws[31U] = u31; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws[i]; - } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; - b0[167U] = 0x80U; - uint64_t ws0[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws0[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws0[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws0[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws0[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws0[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws0[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws0[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws0[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws0[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws0[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws0[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws0[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws0[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws0[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws0[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws0[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws0[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws0[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws0[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws0[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws0[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws0[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws0[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws0[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws0[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws0[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws0[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws0[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws0[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws0[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws0[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws0[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws0[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = state[i + 0U] ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i1 + 5U * i] = state[i1 + 5U * i] ^ _D;);); - uint64_t x = state[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; - uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; - uint64_t temp = state[_Y]; - uint64_t uu____3 = current; - state[_Y] = uu____3 << r | uu____3 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); - uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); - uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); - uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); - uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); - state[0U + 5U * i] = v0; - state[1U + 5U * i] = v1; - state[2U + 5U * i] = v2; - state[3U + 5U * i] = v3; - state[4U + 5U * i] = v4;); - uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; - state[0U] = state[0U] ^ c; - } -} - void Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( uint64_t *state, diff --git a/src/Hacl_Hash_SHA3_Simd256.c b/src/Hacl_Hash_SHA3_Simd256.c index 9748a375..9046f3db 100644 --- a/src/Hacl_Hash_SHA3_Simd256.c +++ b/src/Hacl_Hash_SHA3_Simd256.c @@ -10323,7 +10323,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( } void -Hacl_Hash_SHA3_Simd256_shake128_absorb_last( +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( Lib_IntVector_Intrinsics_vec256 *state, uint8_t *input0, uint8_t *input1, diff --git a/src/msvc/Hacl_Hash_SHA3_Scalar.c b/src/msvc/Hacl_Hash_SHA3_Scalar.c index 7393ebf2..6d6806a3 100644 --- a/src/msvc/Hacl_Hash_SHA3_Scalar.c +++ b/src/msvc/Hacl_Hash_SHA3_Scalar.c @@ -2526,7 +2526,7 @@ Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( } void -Hacl_Hash_SHA3_Scalar_shake128_absorb_last( +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( uint64_t *state, uint8_t *input, uint32_t inputByteLen @@ -2730,331 +2730,6 @@ Hacl_Hash_SHA3_Scalar_shake128_absorb_last( } } -void -Hacl_Hash_SHA3_Scalar_shake128_absorb(uint64_t *state, uint8_t *input, uint32_t inputByteLen) -{ - for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) - { - uint8_t b1[256U] = { 0U }; - uint8_t *b_ = b1; - uint8_t *b0 = input; - uint8_t *bl0 = b_; - memcpy(bl0, b0 + i0 * 168U, 168U * sizeof (uint8_t)); - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u = load64_le(b); - ws[0U] = u; - uint64_t u0 = load64_le(b + 8U); - ws[1U] = u0; - uint64_t u1 = load64_le(b + 16U); - ws[2U] = u1; - uint64_t u2 = load64_le(b + 24U); - ws[3U] = u2; - uint64_t u3 = load64_le(b + 32U); - ws[4U] = u3; - uint64_t u4 = load64_le(b + 40U); - ws[5U] = u4; - uint64_t u5 = load64_le(b + 48U); - ws[6U] = u5; - uint64_t u6 = load64_le(b + 56U); - ws[7U] = u6; - uint64_t u7 = load64_le(b + 64U); - ws[8U] = u7; - uint64_t u8 = load64_le(b + 72U); - ws[9U] = u8; - uint64_t u9 = load64_le(b + 80U); - ws[10U] = u9; - uint64_t u10 = load64_le(b + 88U); - ws[11U] = u10; - uint64_t u11 = load64_le(b + 96U); - ws[12U] = u11; - uint64_t u12 = load64_le(b + 104U); - ws[13U] = u12; - uint64_t u13 = load64_le(b + 112U); - ws[14U] = u13; - uint64_t u14 = load64_le(b + 120U); - ws[15U] = u14; - uint64_t u15 = load64_le(b + 128U); - ws[16U] = u15; - uint64_t u16 = load64_le(b + 136U); - ws[17U] = u16; - uint64_t u17 = load64_le(b + 144U); - ws[18U] = u17; - uint64_t u18 = load64_le(b + 152U); - ws[19U] = u18; - uint64_t u19 = load64_le(b + 160U); - ws[20U] = u19; - uint64_t u20 = load64_le(b + 168U); - ws[21U] = u20; - uint64_t u21 = load64_le(b + 176U); - ws[22U] = u21; - uint64_t u22 = load64_le(b + 184U); - ws[23U] = u22; - uint64_t u23 = load64_le(b + 192U); - ws[24U] = u23; - uint64_t u24 = load64_le(b + 200U); - ws[25U] = u24; - uint64_t u25 = load64_le(b + 208U); - ws[26U] = u25; - uint64_t u26 = load64_le(b + 216U); - ws[27U] = u26; - uint64_t u27 = load64_le(b + 224U); - ws[28U] = u27; - uint64_t u28 = load64_le(b + 232U); - ws[29U] = u28; - uint64_t u29 = load64_le(b + 240U); - ws[30U] = u29; - uint64_t u30 = load64_le(b + 248U); - ws[31U] = u30; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws[i]; - } - for (uint32_t i1 = 0U; i1 < 24U; i1++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = - state[i - + 0U] - ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); - KRML_MAYBE_FOR5(i2, - 0U, - 5U, - 1U, - uint64_t uu____0 = _C[(i2 + 1U) % 5U]; - uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); - uint64_t x = state[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; - uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; - uint64_t temp = state[_Y]; - uint64_t uu____1 = current; - state[_Y] = uu____1 << r | uu____1 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); - uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); - uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); - uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); - uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); - state[0U + 5U * i] = v0; - state[1U + 5U * i] = v1; - state[2U + 5U * i] = v2; - state[3U + 5U * i] = v3; - state[4U + 5U * i] = v4;); - uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; - state[0U] = state[0U] ^ c; - } - } - uint32_t rem = inputByteLen % 168U; - uint8_t b2[256U] = { 0U }; - uint8_t *b_ = b2; - uint32_t rem1 = inputByteLen % 168U; - uint8_t *b00 = input; - uint8_t *bl0 = b_; - memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); - uint8_t *b01 = b_; - b01[rem] = 0x1FU; - uint64_t ws[32U] = { 0U }; - uint8_t *b = b_; - uint64_t u0 = load64_le(b); - ws[0U] = u0; - uint64_t u1 = load64_le(b + 8U); - ws[1U] = u1; - uint64_t u2 = load64_le(b + 16U); - ws[2U] = u2; - uint64_t u3 = load64_le(b + 24U); - ws[3U] = u3; - uint64_t u4 = load64_le(b + 32U); - ws[4U] = u4; - uint64_t u5 = load64_le(b + 40U); - ws[5U] = u5; - uint64_t u6 = load64_le(b + 48U); - ws[6U] = u6; - uint64_t u7 = load64_le(b + 56U); - ws[7U] = u7; - uint64_t u8 = load64_le(b + 64U); - ws[8U] = u8; - uint64_t u9 = load64_le(b + 72U); - ws[9U] = u9; - uint64_t u10 = load64_le(b + 80U); - ws[10U] = u10; - uint64_t u11 = load64_le(b + 88U); - ws[11U] = u11; - uint64_t u12 = load64_le(b + 96U); - ws[12U] = u12; - uint64_t u13 = load64_le(b + 104U); - ws[13U] = u13; - uint64_t u14 = load64_le(b + 112U); - ws[14U] = u14; - uint64_t u15 = load64_le(b + 120U); - ws[15U] = u15; - uint64_t u16 = load64_le(b + 128U); - ws[16U] = u16; - uint64_t u17 = load64_le(b + 136U); - ws[17U] = u17; - uint64_t u18 = load64_le(b + 144U); - ws[18U] = u18; - uint64_t u19 = load64_le(b + 152U); - ws[19U] = u19; - uint64_t u20 = load64_le(b + 160U); - ws[20U] = u20; - uint64_t u21 = load64_le(b + 168U); - ws[21U] = u21; - uint64_t u22 = load64_le(b + 176U); - ws[22U] = u22; - uint64_t u23 = load64_le(b + 184U); - ws[23U] = u23; - uint64_t u24 = load64_le(b + 192U); - ws[24U] = u24; - uint64_t u25 = load64_le(b + 200U); - ws[25U] = u25; - uint64_t u26 = load64_le(b + 208U); - ws[26U] = u26; - uint64_t u27 = load64_le(b + 216U); - ws[27U] = u27; - uint64_t u28 = load64_le(b + 224U); - ws[28U] = u28; - uint64_t u29 = load64_le(b + 232U); - ws[29U] = u29; - uint64_t u30 = load64_le(b + 240U); - ws[30U] = u30; - uint64_t u31 = load64_le(b + 248U); - ws[31U] = u31; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws[i]; - } - uint8_t b3[256U] = { 0U }; - uint8_t *b4 = b3; - uint8_t *b0 = b4; - b0[167U] = 0x80U; - uint64_t ws0[32U] = { 0U }; - uint8_t *b1 = b4; - uint64_t u = load64_le(b1); - ws0[0U] = u; - uint64_t u32 = load64_le(b1 + 8U); - ws0[1U] = u32; - uint64_t u33 = load64_le(b1 + 16U); - ws0[2U] = u33; - uint64_t u34 = load64_le(b1 + 24U); - ws0[3U] = u34; - uint64_t u35 = load64_le(b1 + 32U); - ws0[4U] = u35; - uint64_t u36 = load64_le(b1 + 40U); - ws0[5U] = u36; - uint64_t u37 = load64_le(b1 + 48U); - ws0[6U] = u37; - uint64_t u38 = load64_le(b1 + 56U); - ws0[7U] = u38; - uint64_t u39 = load64_le(b1 + 64U); - ws0[8U] = u39; - uint64_t u40 = load64_le(b1 + 72U); - ws0[9U] = u40; - uint64_t u41 = load64_le(b1 + 80U); - ws0[10U] = u41; - uint64_t u42 = load64_le(b1 + 88U); - ws0[11U] = u42; - uint64_t u43 = load64_le(b1 + 96U); - ws0[12U] = u43; - uint64_t u44 = load64_le(b1 + 104U); - ws0[13U] = u44; - uint64_t u45 = load64_le(b1 + 112U); - ws0[14U] = u45; - uint64_t u46 = load64_le(b1 + 120U); - ws0[15U] = u46; - uint64_t u47 = load64_le(b1 + 128U); - ws0[16U] = u47; - uint64_t u48 = load64_le(b1 + 136U); - ws0[17U] = u48; - uint64_t u49 = load64_le(b1 + 144U); - ws0[18U] = u49; - uint64_t u50 = load64_le(b1 + 152U); - ws0[19U] = u50; - uint64_t u51 = load64_le(b1 + 160U); - ws0[20U] = u51; - uint64_t u52 = load64_le(b1 + 168U); - ws0[21U] = u52; - uint64_t u53 = load64_le(b1 + 176U); - ws0[22U] = u53; - uint64_t u54 = load64_le(b1 + 184U); - ws0[23U] = u54; - uint64_t u55 = load64_le(b1 + 192U); - ws0[24U] = u55; - uint64_t u56 = load64_le(b1 + 200U); - ws0[25U] = u56; - uint64_t u57 = load64_le(b1 + 208U); - ws0[26U] = u57; - uint64_t u58 = load64_le(b1 + 216U); - ws0[27U] = u58; - uint64_t u59 = load64_le(b1 + 224U); - ws0[28U] = u59; - uint64_t u60 = load64_le(b1 + 232U); - ws0[29U] = u60; - uint64_t u61 = load64_le(b1 + 240U); - ws0[30U] = u61; - uint64_t u62 = load64_le(b1 + 248U); - ws0[31U] = u62; - for (uint32_t i = 0U; i < 25U; i++) - { - state[i] = state[i] ^ ws0[i]; - } - for (uint32_t i0 = 0U; i0 < 24U; i0++) - { - uint64_t _C[5U] = { 0U }; - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - _C[i] = state[i + 0U] ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); - KRML_MAYBE_FOR5(i1, - 0U, - 5U, - 1U, - uint64_t uu____2 = _C[(i1 + 1U) % 5U]; - uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); - KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i1 + 5U * i] = state[i1 + 5U * i] ^ _D;);); - uint64_t x = state[1U]; - uint64_t current = x; - for (uint32_t i = 0U; i < 24U; i++) - { - uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; - uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; - uint64_t temp = state[_Y]; - uint64_t uu____3 = current; - state[_Y] = uu____3 << r | uu____3 >> (64U - r); - current = temp; - } - KRML_MAYBE_FOR5(i, - 0U, - 5U, - 1U, - uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); - uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); - uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); - uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); - uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); - state[0U + 5U * i] = v0; - state[1U + 5U * i] = v1; - state[2U + 5U * i] = v2; - state[3U + 5U * i] = v3; - state[4U + 5U * i] = v4;); - uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; - state[0U] = state[0U] ^ c; - } -} - void Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( uint64_t *state, diff --git a/src/msvc/Hacl_Hash_SHA3_Simd256.c b/src/msvc/Hacl_Hash_SHA3_Simd256.c index 9748a375..9046f3db 100644 --- a/src/msvc/Hacl_Hash_SHA3_Simd256.c +++ b/src/msvc/Hacl_Hash_SHA3_Simd256.c @@ -10323,7 +10323,7 @@ Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( } void -Hacl_Hash_SHA3_Simd256_shake128_absorb_last( +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( Lib_IntVector_Intrinsics_vec256 *state, uint8_t *input0, uint8_t *input1,