mirror of
https://github.com/python/cpython.git
synced 2024-11-28 08:20:55 +01:00
fcadc7e405
Replaces our fallback non-OpenSSL MD5 and SHA1 implementations with those from HACL* as we've already done with SHA2.
509 lines
14 KiB
C
509 lines
14 KiB
C
/* MIT License
|
|
*
|
|
* Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
|
|
* Copyright (c) 2022-2023 HACL* Contributors
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in all
|
|
* copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*/
|
|
|
|
|
|
#include "internal/Hacl_Hash_SHA1.h"
|
|
|
|
static uint32_t
|
|
_h0[5U] =
|
|
{
|
|
(uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
|
|
(uint32_t)0xc3d2e1f0U
|
|
};
|
|
|
|
void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s)
|
|
{
|
|
KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, s[i] = _h0[i];);
|
|
}
|
|
|
|
static void legacy_update(uint32_t *h, uint8_t *l)
|
|
{
|
|
uint32_t ha = h[0U];
|
|
uint32_t hb = h[1U];
|
|
uint32_t hc = h[2U];
|
|
uint32_t hd = h[3U];
|
|
uint32_t he = h[4U];
|
|
uint32_t _w[80U] = { 0U };
|
|
for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
|
|
{
|
|
uint32_t v;
|
|
if (i < (uint32_t)16U)
|
|
{
|
|
uint8_t *b = l + i * (uint32_t)4U;
|
|
uint32_t u = load32_be(b);
|
|
v = u;
|
|
}
|
|
else
|
|
{
|
|
uint32_t wmit3 = _w[i - (uint32_t)3U];
|
|
uint32_t wmit8 = _w[i - (uint32_t)8U];
|
|
uint32_t wmit14 = _w[i - (uint32_t)14U];
|
|
uint32_t wmit16 = _w[i - (uint32_t)16U];
|
|
v =
|
|
(wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16)))
|
|
<< (uint32_t)1U
|
|
| (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U;
|
|
}
|
|
_w[i] = v;
|
|
}
|
|
for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
|
|
{
|
|
uint32_t _a = h[0U];
|
|
uint32_t _b = h[1U];
|
|
uint32_t _c = h[2U];
|
|
uint32_t _d = h[3U];
|
|
uint32_t _e = h[4U];
|
|
uint32_t wmit = _w[i];
|
|
uint32_t ite0;
|
|
if (i < (uint32_t)20U)
|
|
{
|
|
ite0 = (_b & _c) ^ (~_b & _d);
|
|
}
|
|
else if ((uint32_t)39U < i && i < (uint32_t)60U)
|
|
{
|
|
ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d));
|
|
}
|
|
else
|
|
{
|
|
ite0 = _b ^ (_c ^ _d);
|
|
}
|
|
uint32_t ite;
|
|
if (i < (uint32_t)20U)
|
|
{
|
|
ite = (uint32_t)0x5a827999U;
|
|
}
|
|
else if (i < (uint32_t)40U)
|
|
{
|
|
ite = (uint32_t)0x6ed9eba1U;
|
|
}
|
|
else if (i < (uint32_t)60U)
|
|
{
|
|
ite = (uint32_t)0x8f1bbcdcU;
|
|
}
|
|
else
|
|
{
|
|
ite = (uint32_t)0xca62c1d6U;
|
|
}
|
|
uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit;
|
|
h[0U] = _T;
|
|
h[1U] = _a;
|
|
h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U;
|
|
h[3U] = _c;
|
|
h[4U] = _d;
|
|
}
|
|
for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
|
|
{
|
|
_w[i] = (uint32_t)0U;
|
|
}
|
|
uint32_t sta = h[0U];
|
|
uint32_t stb = h[1U];
|
|
uint32_t stc = h[2U];
|
|
uint32_t std = h[3U];
|
|
uint32_t ste = h[4U];
|
|
h[0U] = sta + ha;
|
|
h[1U] = stb + hb;
|
|
h[2U] = stc + hc;
|
|
h[3U] = std + hd;
|
|
h[4U] = ste + he;
|
|
}
|
|
|
|
static void legacy_pad(uint64_t len, uint8_t *dst)
|
|
{
|
|
uint8_t *dst1 = dst;
|
|
dst1[0U] = (uint8_t)0x80U;
|
|
uint8_t *dst2 = dst + (uint32_t)1U;
|
|
for
|
|
(uint32_t
|
|
i = (uint32_t)0U;
|
|
i
|
|
< ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
|
|
i++)
|
|
{
|
|
dst2[i] = (uint8_t)0U;
|
|
}
|
|
uint8_t
|
|
*dst3 =
|
|
dst
|
|
+
|
|
(uint32_t)1U
|
|
+
|
|
((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
|
|
% (uint32_t)64U;
|
|
store64_be(dst3, len << (uint32_t)3U);
|
|
}
|
|
|
|
void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst)
|
|
{
|
|
KRML_MAYBE_FOR5(i,
|
|
(uint32_t)0U,
|
|
(uint32_t)5U,
|
|
(uint32_t)1U,
|
|
store32_be(dst + i * (uint32_t)4U, s[i]););
|
|
}
|
|
|
|
void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
|
|
{
|
|
for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
|
|
{
|
|
uint32_t sz = (uint32_t)64U;
|
|
uint8_t *block = blocks + sz * i;
|
|
legacy_update(s, block);
|
|
}
|
|
}
|
|
|
|
void
|
|
Hacl_Hash_SHA1_legacy_update_last(
|
|
uint32_t *s,
|
|
uint64_t prev_len,
|
|
uint8_t *input,
|
|
uint32_t input_len
|
|
)
|
|
{
|
|
uint32_t blocks_n = input_len / (uint32_t)64U;
|
|
uint32_t blocks_len = blocks_n * (uint32_t)64U;
|
|
uint8_t *blocks = input;
|
|
uint32_t rest_len = input_len - blocks_len;
|
|
uint8_t *rest = input + blocks_len;
|
|
Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
|
|
uint64_t total_input_len = prev_len + (uint64_t)input_len;
|
|
uint32_t
|
|
pad_len =
|
|
(uint32_t)1U
|
|
+
|
|
((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
|
|
% (uint32_t)64U
|
|
+ (uint32_t)8U;
|
|
uint32_t tmp_len = rest_len + pad_len;
|
|
uint8_t tmp_twoblocks[128U] = { 0U };
|
|
uint8_t *tmp = tmp_twoblocks;
|
|
uint8_t *tmp_rest = tmp;
|
|
uint8_t *tmp_pad = tmp + rest_len;
|
|
memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
|
|
legacy_pad(total_input_len, tmp_pad);
|
|
Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
|
|
}
|
|
|
|
void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
|
|
{
|
|
uint32_t
|
|
s[5U] =
|
|
{
|
|
(uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
|
|
(uint32_t)0xc3d2e1f0U
|
|
};
|
|
uint32_t blocks_n0 = input_len / (uint32_t)64U;
|
|
uint32_t blocks_n1;
|
|
if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
|
|
{
|
|
blocks_n1 = blocks_n0 - (uint32_t)1U;
|
|
}
|
|
else
|
|
{
|
|
blocks_n1 = blocks_n0;
|
|
}
|
|
uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
|
|
uint8_t *blocks0 = input;
|
|
uint32_t rest_len0 = input_len - blocks_len0;
|
|
uint8_t *rest0 = input + blocks_len0;
|
|
uint32_t blocks_n = blocks_n1;
|
|
uint32_t blocks_len = blocks_len0;
|
|
uint8_t *blocks = blocks0;
|
|
uint32_t rest_len = rest_len0;
|
|
uint8_t *rest = rest0;
|
|
Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
|
|
Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
|
|
Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
|
|
}
|
|
|
|
Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void)
|
|
{
|
|
uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
|
|
uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
|
|
Hacl_Streaming_MD_state_32
|
|
s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
|
|
Hacl_Streaming_MD_state_32
|
|
*p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
|
|
p[0U] = s;
|
|
Hacl_Hash_Core_SHA1_legacy_init(block_state);
|
|
return p;
|
|
}
|
|
|
|
void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s)
|
|
{
|
|
Hacl_Streaming_MD_state_32 scrut = *s;
|
|
uint8_t *buf = scrut.buf;
|
|
uint32_t *block_state = scrut.block_state;
|
|
Hacl_Hash_Core_SHA1_legacy_init(block_state);
|
|
Hacl_Streaming_MD_state_32
|
|
tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
|
|
s[0U] = tmp;
|
|
}
|
|
|
|
/**
|
|
0 = success, 1 = max length exceeded
|
|
*/
|
|
uint32_t
|
|
Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
|
|
{
|
|
Hacl_Streaming_MD_state_32 s = *p;
|
|
uint64_t total_len = s.total_len;
|
|
if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
|
|
{
|
|
return (uint32_t)1U;
|
|
}
|
|
uint32_t sz;
|
|
if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
|
|
{
|
|
sz = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
|
|
}
|
|
if (len <= (uint32_t)64U - sz)
|
|
{
|
|
Hacl_Streaming_MD_state_32 s1 = *p;
|
|
uint32_t *block_state1 = s1.block_state;
|
|
uint8_t *buf = s1.buf;
|
|
uint64_t total_len1 = s1.total_len;
|
|
uint32_t sz1;
|
|
if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
|
|
{
|
|
sz1 = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
|
|
}
|
|
uint8_t *buf2 = buf + sz1;
|
|
memcpy(buf2, data, len * sizeof (uint8_t));
|
|
uint64_t total_len2 = total_len1 + (uint64_t)len;
|
|
*p
|
|
=
|
|
(
|
|
(Hacl_Streaming_MD_state_32){
|
|
.block_state = block_state1,
|
|
.buf = buf,
|
|
.total_len = total_len2
|
|
}
|
|
);
|
|
}
|
|
else if (sz == (uint32_t)0U)
|
|
{
|
|
Hacl_Streaming_MD_state_32 s1 = *p;
|
|
uint32_t *block_state1 = s1.block_state;
|
|
uint8_t *buf = s1.buf;
|
|
uint64_t total_len1 = s1.total_len;
|
|
uint32_t sz1;
|
|
if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
|
|
{
|
|
sz1 = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
|
|
}
|
|
if (!(sz1 == (uint32_t)0U))
|
|
{
|
|
Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
|
|
}
|
|
uint32_t ite;
|
|
if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
|
|
{
|
|
ite = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
|
|
}
|
|
uint32_t n_blocks = (len - ite) / (uint32_t)64U;
|
|
uint32_t data1_len = n_blocks * (uint32_t)64U;
|
|
uint32_t data2_len = len - data1_len;
|
|
uint8_t *data1 = data;
|
|
uint8_t *data2 = data + data1_len;
|
|
Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
|
|
uint8_t *dst = buf;
|
|
memcpy(dst, data2, data2_len * sizeof (uint8_t));
|
|
*p
|
|
=
|
|
(
|
|
(Hacl_Streaming_MD_state_32){
|
|
.block_state = block_state1,
|
|
.buf = buf,
|
|
.total_len = total_len1 + (uint64_t)len
|
|
}
|
|
);
|
|
}
|
|
else
|
|
{
|
|
uint32_t diff = (uint32_t)64U - sz;
|
|
uint8_t *data1 = data;
|
|
uint8_t *data2 = data + diff;
|
|
Hacl_Streaming_MD_state_32 s1 = *p;
|
|
uint32_t *block_state10 = s1.block_state;
|
|
uint8_t *buf0 = s1.buf;
|
|
uint64_t total_len10 = s1.total_len;
|
|
uint32_t sz10;
|
|
if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
|
|
{
|
|
sz10 = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
|
|
}
|
|
uint8_t *buf2 = buf0 + sz10;
|
|
memcpy(buf2, data1, diff * sizeof (uint8_t));
|
|
uint64_t total_len2 = total_len10 + (uint64_t)diff;
|
|
*p
|
|
=
|
|
(
|
|
(Hacl_Streaming_MD_state_32){
|
|
.block_state = block_state10,
|
|
.buf = buf0,
|
|
.total_len = total_len2
|
|
}
|
|
);
|
|
Hacl_Streaming_MD_state_32 s10 = *p;
|
|
uint32_t *block_state1 = s10.block_state;
|
|
uint8_t *buf = s10.buf;
|
|
uint64_t total_len1 = s10.total_len;
|
|
uint32_t sz1;
|
|
if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
|
|
{
|
|
sz1 = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
|
|
}
|
|
if (!(sz1 == (uint32_t)0U))
|
|
{
|
|
Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
|
|
}
|
|
uint32_t ite;
|
|
if
|
|
(
|
|
(uint64_t)(len - diff)
|
|
% (uint64_t)(uint32_t)64U
|
|
== (uint64_t)0U
|
|
&& (uint64_t)(len - diff) > (uint64_t)0U
|
|
)
|
|
{
|
|
ite = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
|
|
}
|
|
uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
|
|
uint32_t data1_len = n_blocks * (uint32_t)64U;
|
|
uint32_t data2_len = len - diff - data1_len;
|
|
uint8_t *data11 = data2;
|
|
uint8_t *data21 = data2 + data1_len;
|
|
Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
|
|
uint8_t *dst = buf;
|
|
memcpy(dst, data21, data2_len * sizeof (uint8_t));
|
|
*p
|
|
=
|
|
(
|
|
(Hacl_Streaming_MD_state_32){
|
|
.block_state = block_state1,
|
|
.buf = buf,
|
|
.total_len = total_len1 + (uint64_t)(len - diff)
|
|
}
|
|
);
|
|
}
|
|
return (uint32_t)0U;
|
|
}
|
|
|
|
void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
|
|
{
|
|
Hacl_Streaming_MD_state_32 scrut = *p;
|
|
uint32_t *block_state = scrut.block_state;
|
|
uint8_t *buf_ = scrut.buf;
|
|
uint64_t total_len = scrut.total_len;
|
|
uint32_t r;
|
|
if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
|
|
{
|
|
r = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
|
|
}
|
|
uint8_t *buf_1 = buf_;
|
|
uint32_t tmp_block_state[5U] = { 0U };
|
|
memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t));
|
|
uint32_t ite;
|
|
if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
|
|
{
|
|
ite = (uint32_t)64U;
|
|
}
|
|
else
|
|
{
|
|
ite = r % (uint32_t)64U;
|
|
}
|
|
uint8_t *buf_last = buf_1 + r - ite;
|
|
uint8_t *buf_multi = buf_1;
|
|
Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
|
|
uint64_t prev_len_last = total_len - (uint64_t)r;
|
|
Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
|
|
Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst);
|
|
}
|
|
|
|
void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s)
|
|
{
|
|
Hacl_Streaming_MD_state_32 scrut = *s;
|
|
uint8_t *buf = scrut.buf;
|
|
uint32_t *block_state = scrut.block_state;
|
|
KRML_HOST_FREE(block_state);
|
|
KRML_HOST_FREE(buf);
|
|
KRML_HOST_FREE(s);
|
|
}
|
|
|
|
Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
|
|
{
|
|
Hacl_Streaming_MD_state_32 scrut = *s0;
|
|
uint32_t *block_state0 = scrut.block_state;
|
|
uint8_t *buf0 = scrut.buf;
|
|
uint64_t total_len0 = scrut.total_len;
|
|
uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
|
|
memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
|
|
uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
|
|
memcpy(block_state, block_state0, (uint32_t)5U * sizeof (uint32_t));
|
|
Hacl_Streaming_MD_state_32
|
|
s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
|
|
Hacl_Streaming_MD_state_32
|
|
*p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
|
|
p[0U] = s;
|
|
return p;
|
|
}
|
|
|
|
void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
|
|
{
|
|
Hacl_Hash_SHA1_legacy_hash(input, input_len, dst);
|
|
}
|
|
|