experimental hashing with oxcaml
1/*********************************************************************
2* Filename: sha256.c
3* Author: Brad Conte (brad AT bradconte.com)
4* Copyright:
5* Disclaimer: This code is presented "as is" without any guarantees.
6* Details: Implementation of the SHA-256 hashing algorithm.
7 SHA-256 is one of the three algorithms in the SHA2
8 specification. The others, SHA-384 and SHA-512, are not
9 offered in this implementation.
10 Algorithm specification can be found here:
11 * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2withchangenotice.pdf
12 This implementation uses little endian byte order.
13*********************************************************************/
14
15/*************************** HEADER FILES ***************************/
16#include <stdlib.h>
17#include <stdio.h>
18#include <memory.h>
19#include "sha256.h"
20
21static const uint32_t K[] =
22{
23 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
24 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
25 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3,
26 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
27 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
28 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
29 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7,
30 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
31 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13,
32 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
33 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3,
34 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
35 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5,
36 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
37 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
38 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2
39};
40
41#if defined(__arm__) || defined(__aarch32__) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM)
42// ============== ARM64 begin =======================
43// All the ARM servers supports SHA256 instructions
44# if defined(__GNUC__)
45# include <stdint.h>
46# endif
47# if defined(__ARM_NEON) || defined(_MSC_VER) || defined(__GNUC__)
48# include <arm_neon.h>
49# endif
50/* GCC and LLVM Clang, but not Apple Clang */
51# if defined(__GNUC__) && !defined(__apple_build_version__)
52# if defined(__ARM_ACLE) || defined(__ARM_FEATURE_CRYPTO)
53# include <arm_acle.h>
54# endif
55# endif
56void sha256_process(uint32_t state[8], const uint8_t data[], uint32_t length)
57{
58 uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE;
59 uint32x4_t MSG0, MSG1, MSG2, MSG3;
60 uint32x4_t TMP0, TMP1, TMP2;
61
62 /* Load state */
63 STATE0 = vld1q_u32(&state[0]);
64 STATE1 = vld1q_u32(&state[4]);
65
66 while (length >= 64)
67 {
68 /* Save state */
69 ABEF_SAVE = STATE0;
70 CDGH_SAVE = STATE1;
71
72 /* Load message */
73 MSG0 = vld1q_u32((const uint32_t *)(data + 0));
74 MSG1 = vld1q_u32((const uint32_t *)(data + 16));
75 MSG2 = vld1q_u32((const uint32_t *)(data + 32));
76 MSG3 = vld1q_u32((const uint32_t *)(data + 48));
77
78 /* Reverse for little endian */
79 MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
80 MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
81 MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
82 MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
83
84 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x00]));
85
86 /* Rounds 0-3 */
87 MSG0 = vsha256su0q_u32(MSG0, MSG1);
88 TMP2 = STATE0;
89 TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x04]));
90 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
91 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
92 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
93
94 /* Rounds 4-7 */
95 MSG1 = vsha256su0q_u32(MSG1, MSG2);
96 TMP2 = STATE0;
97 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x08]));
98 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
99 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
100 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
101
102 /* Rounds 8-11 */
103 MSG2 = vsha256su0q_u32(MSG2, MSG3);
104 TMP2 = STATE0;
105 TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x0c]));
106 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
107 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
108 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
109
110 /* Rounds 12-15 */
111 MSG3 = vsha256su0q_u32(MSG3, MSG0);
112 TMP2 = STATE0;
113 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x10]));
114 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
115 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
116 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
117
118 /* Rounds 16-19 */
119 MSG0 = vsha256su0q_u32(MSG0, MSG1);
120 TMP2 = STATE0;
121 TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x14]));
122 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
123 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
124 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
125
126 /* Rounds 20-23 */
127 MSG1 = vsha256su0q_u32(MSG1, MSG2);
128 TMP2 = STATE0;
129 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x18]));
130 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
131 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
132 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
133
134 /* Rounds 24-27 */
135 MSG2 = vsha256su0q_u32(MSG2, MSG3);
136 TMP2 = STATE0;
137 TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x1c]));
138 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
139 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
140 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
141
142 /* Rounds 28-31 */
143 MSG3 = vsha256su0q_u32(MSG3, MSG0);
144 TMP2 = STATE0;
145 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x20]));
146 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
147 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
148 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
149
150 /* Rounds 32-35 */
151 MSG0 = vsha256su0q_u32(MSG0, MSG1);
152 TMP2 = STATE0;
153 TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x24]));
154 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
155 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
156 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
157
158 /* Rounds 36-39 */
159 MSG1 = vsha256su0q_u32(MSG1, MSG2);
160 TMP2 = STATE0;
161 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x28]));
162 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
163 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
164 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
165
166 /* Rounds 40-43 */
167 MSG2 = vsha256su0q_u32(MSG2, MSG3);
168 TMP2 = STATE0;
169 TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x2c]));
170 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
171 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
172 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
173
174 /* Rounds 44-47 */
175 MSG3 = vsha256su0q_u32(MSG3, MSG0);
176 TMP2 = STATE0;
177 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x30]));
178 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
179 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
180 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
181
182 /* Rounds 48-51 */
183 TMP2 = STATE0;
184 TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x34]));
185 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
186 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
187
188 /* Rounds 52-55 */
189 TMP2 = STATE0;
190 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x38]));
191 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
192 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
193
194 /* Rounds 56-59 */
195 TMP2 = STATE0;
196 TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x3c]));
197 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
198 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
199
200 /* Rounds 60-63 */
201 TMP2 = STATE0;
202 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
203 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
204
205 /* Combine state */
206 STATE0 = vaddq_u32(STATE0, ABEF_SAVE);
207 STATE1 = vaddq_u32(STATE1, CDGH_SAVE);
208
209 data += 64;
210 length -= 64;
211 }
212
213 /* Save state */
214 vst1q_u32(&state[0], STATE0);
215 vst1q_u32(&state[4], STATE1);
216}
217
218// ============== ARM64 end =======================
219#else
220// ============== x86-64 begin =======================
221/* Include the GCC super header */
222#if defined(__GNUC__)
223# include <stdint.h>
224# include <x86intrin.h>
225#endif
226
227/* Microsoft supports Intel SHA ACLE extensions as of Visual Studio 2015 */
228#if defined(_MSC_VER)
229# include <immintrin.h>
230# define WIN32_LEAN_AND_MEAN
231# include <Windows.h>
232#endif
233#define ROTATE(x,y) (((x)>>(y)) | ((x)<<(32-(y))))
234#define Sigma0(x) (ROTATE((x), 2) ^ ROTATE((x),13) ^ ROTATE((x),22))
235#define Sigma1(x) (ROTATE((x), 6) ^ ROTATE((x),11) ^ ROTATE((x),25))
236#define sigma0(x) (ROTATE((x), 7) ^ ROTATE((x),18) ^ ((x)>> 3))
237#define sigma1(x) (ROTATE((x),17) ^ ROTATE((x),19) ^ ((x)>>10))
238
239#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
240#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
241
242/* Avoid undefined behavior */
243/* https://stackoverflow.com/q/29538935/608639 */
244uint32_t B2U32(uint8_t val, uint8_t sh)
245{
246 return ((uint32_t)val) << sh;
247}
248
249void sha256_process_c(uint32_t state[8], const uint8_t data[], size_t length)
250{
251 uint32_t a, b, c, d, e, f, g, h, s0, s1, T1, T2;
252 uint32_t X[16], i;
253
254 size_t blocks = length / 64;
255 while (blocks--)
256 {
257 a = state[0];
258 b = state[1];
259 c = state[2];
260 d = state[3];
261 e = state[4];
262 f = state[5];
263 g = state[6];
264 h = state[7];
265
266 for (i = 0; i < 16; i++)
267 {
268 X[i] = B2U32(data[0], 24) | B2U32(data[1], 16) | B2U32(data[2], 8) | B2U32(data[3], 0);
269 data += 4;
270
271 T1 = h;
272 T1 += Sigma1(e);
273 T1 += Ch(e, f, g);
274 T1 += K[i];
275 T1 += X[i];
276
277 T2 = Sigma0(a);
278 T2 += Maj(a, b, c);
279
280 h = g;
281 g = f;
282 f = e;
283 e = d + T1;
284 d = c;
285 c = b;
286 b = a;
287 a = T1 + T2;
288 }
289
290 for (; i < 64; i++)
291 {
292 s0 = X[(i + 1) & 0x0f];
293 s0 = sigma0(s0);
294 s1 = X[(i + 14) & 0x0f];
295 s1 = sigma1(s1);
296
297 T1 = X[i & 0xf] += s0 + s1 + X[(i + 9) & 0xf];
298 T1 += h + Sigma1(e) + Ch(e, f, g) + K[i];
299 T2 = Sigma0(a) + Maj(a, b, c);
300 h = g;
301 g = f;
302 f = e;
303 e = d + T1;
304 d = c;
305 c = b;
306 b = a;
307 a = T1 + T2;
308 }
309
310 state[0] += a;
311 state[1] += b;
312 state[2] += c;
313 state[3] += d;
314 state[4] += e;
315 state[5] += f;
316 state[6] += g;
317 state[7] += h;
318 }
319}
320
321/* Process multiple blocks. The caller is responsible for setting the initial */
322/* state, and the caller is responsible for padding the final block. */
323void sha256_process_asm(uint32_t state[8], const uint8_t data[], size_t length)
324{
325 __m128i STATE0, STATE1;
326 __m128i MSG, TMP;
327 __m128i MSG0, MSG1, MSG2, MSG3;
328 __m128i ABEF_SAVE, CDGH_SAVE;
329 const __m128i MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
330
331 /* Load initial values */
332 TMP = _mm_loadu_si128((const __m128i*) &state[0]);
333 STATE1 = _mm_loadu_si128((const __m128i*) &state[4]);
334
335
336 TMP = _mm_shuffle_epi32(TMP, 0xB1); /* CDAB */
337 STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); /* EFGH */
338 STATE0 = _mm_alignr_epi8(TMP, STATE1, 8); /* ABEF */
339 STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0); /* CDGH */
340
341 while (length >= 64)
342 {
343 /* Save current state */
344 ABEF_SAVE = STATE0;
345 CDGH_SAVE = STATE1;
346
347 /* Rounds 0-3 */
348 MSG = _mm_loadu_si128((const __m128i*) (data+0));
349 MSG0 = _mm_shuffle_epi8(MSG, MASK);
350 MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
351 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
352 MSG = _mm_shuffle_epi32(MSG, 0x0E);
353 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
354
355 /* Rounds 4-7 */
356 MSG1 = _mm_loadu_si128((const __m128i*) (data+16));
357 MSG1 = _mm_shuffle_epi8(MSG1, MASK);
358 MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
359 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
360 MSG = _mm_shuffle_epi32(MSG, 0x0E);
361 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
362 MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
363
364 /* Rounds 8-11 */
365 MSG2 = _mm_loadu_si128((const __m128i*) (data+32));
366 MSG2 = _mm_shuffle_epi8(MSG2, MASK);
367 MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
368 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
369 MSG = _mm_shuffle_epi32(MSG, 0x0E);
370 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
371 MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
372
373 /* Rounds 12-15 */
374 MSG3 = _mm_loadu_si128((const __m128i*) (data+48));
375 MSG3 = _mm_shuffle_epi8(MSG3, MASK);
376 MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
377 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
378 TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
379 MSG0 = _mm_add_epi32(MSG0, TMP);
380 MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
381 MSG = _mm_shuffle_epi32(MSG, 0x0E);
382 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
383 MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
384
385 /* Rounds 16-19 */
386 MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
387 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
388 TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
389 MSG1 = _mm_add_epi32(MSG1, TMP);
390 MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
391 MSG = _mm_shuffle_epi32(MSG, 0x0E);
392 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
393 MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
394
395 /* Rounds 20-23 */
396 MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
397 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
398 TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
399 MSG2 = _mm_add_epi32(MSG2, TMP);
400 MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
401 MSG = _mm_shuffle_epi32(MSG, 0x0E);
402 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
403 MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
404
405 /* Rounds 24-27 */
406 MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
407 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
408 TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
409 MSG3 = _mm_add_epi32(MSG3, TMP);
410 MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
411 MSG = _mm_shuffle_epi32(MSG, 0x0E);
412 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
413 MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
414
415 /* Rounds 28-31 */
416 MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
417 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
418 TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
419 MSG0 = _mm_add_epi32(MSG0, TMP);
420 MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
421 MSG = _mm_shuffle_epi32(MSG, 0x0E);
422 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
423 MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
424
425 /* Rounds 32-35 */
426 MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
427 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
428 TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
429 MSG1 = _mm_add_epi32(MSG1, TMP);
430 MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
431 MSG = _mm_shuffle_epi32(MSG, 0x0E);
432 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
433 MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
434
435 /* Rounds 36-39 */
436 MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
437 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
438 TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
439 MSG2 = _mm_add_epi32(MSG2, TMP);
440 MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
441 MSG = _mm_shuffle_epi32(MSG, 0x0E);
442 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
443 MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
444
445 /* Rounds 40-43 */
446 MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
447 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
448 TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
449 MSG3 = _mm_add_epi32(MSG3, TMP);
450 MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
451 MSG = _mm_shuffle_epi32(MSG, 0x0E);
452 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
453 MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
454
455 /* Rounds 44-47 */
456 MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
457 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
458 TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
459 MSG0 = _mm_add_epi32(MSG0, TMP);
460 MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
461 MSG = _mm_shuffle_epi32(MSG, 0x0E);
462 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
463 MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
464
465 /* Rounds 48-51 */
466 MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
467 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
468 TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
469 MSG1 = _mm_add_epi32(MSG1, TMP);
470 MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
471 MSG = _mm_shuffle_epi32(MSG, 0x0E);
472 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
473 MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
474
475 /* Rounds 52-55 */
476 MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
477 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
478 TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
479 MSG2 = _mm_add_epi32(MSG2, TMP);
480 MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
481 MSG = _mm_shuffle_epi32(MSG, 0x0E);
482 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
483
484 /* Rounds 56-59 */
485 MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
486 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
487 TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
488 MSG3 = _mm_add_epi32(MSG3, TMP);
489 MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
490 MSG = _mm_shuffle_epi32(MSG, 0x0E);
491 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
492
493 /* Rounds 60-63 */
494 MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
495 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
496 MSG = _mm_shuffle_epi32(MSG, 0x0E);
497 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
498
499 /* Combine state */
500 STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
501 STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
502
503 data += 64;
504 length -= 64;
505 }
506
507 TMP = _mm_shuffle_epi32(STATE0, 0x1B); /* FEBA */
508 STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); /* DCHG */
509 STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); /* DCBA */
510 STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); /* ABEF */
511
512 /* Save state */
513 _mm_storeu_si128((__m128i*) &state[0], STATE0);
514 _mm_storeu_si128((__m128i*) &state[4], STATE1);
515}
516
517#if defined(__clang__) || defined(__GNUC__) || defined(__INTEL_COMPILER)
518
519#include <cpuid.h>
520int supports_sha_ni(void)
521{
522 unsigned int CPUInfo[4];
523 __cpuid(0, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]);
524 if (CPUInfo[0] < 7)
525 return 0;
526
527 __cpuid_count(7, 0, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]);
528 return CPUInfo[1] & (1 << 29); /* SHA */
529}
530
531#else /* defined(__clang__) || defined(__GNUC__) */
532
533int supports_sha_ni(void)
534{
535 unsigned int CPUInfo[4];
536 __cpuid(CPUInfo, 0);
537 if (CPUInfo[0] < 7)
538 return 0;
539
540 __cpuidex(CPUInfo, 7, 0);
541 return CPUInfo[1] & (1 << 29); /* Check SHA */
542}
543
544#endif /* defined(__clang__) || defined(__GNUC__) */
545
546void sha256_process(uint32_t state[8], const uint8_t data[], size_t length) {
547 static int has_sha_ni = -1;
548 if(has_sha_ni == -1 ) {
549 has_sha_ni = supports_sha_ni();
550 }
551
552 if(has_sha_ni) {
553 sha256_process_asm(state, data, length);
554 //printf("In sha256_process_asm length %zu\n", length);
555 } else {
556 sha256_process_c(state, data, length);
557 //printf("In sha256_process_c length %zu\n", length);
558 }
559}
560// ============== x86-64 end =======================
561#endif
562
563void sha256_init(SHA256_CTX *ctx)
564{
565 ctx->datalen = 0;
566 ctx->bitlen = 0;
567 ctx->state[0] = 0x6a09e667;
568 ctx->state[1] = 0xbb67ae85;
569 ctx->state[2] = 0x3c6ef372;
570 ctx->state[3] = 0xa54ff53a;
571 ctx->state[4] = 0x510e527f;
572 ctx->state[5] = 0x9b05688c;
573 ctx->state[6] = 0x1f83d9ab;
574 ctx->state[7] = 0x5be0cd19;
575}
576
577void sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len)
578{
579 WORD i;
580
581 size_t rounded = 64*(len/64);
582 if(rounded != 0) {
583 sha256_process(ctx->state, data, rounded);
584 }
585
586 ctx->bitlen = rounded*8;
587 ctx->datalen = 0;
588 for (i = rounded; i < len; ++i) {
589 ctx->data[ctx->datalen] = data[i];
590 ctx->datalen++;
591 }
592}
593
594void sha256_final(SHA256_CTX *ctx, BYTE hash[])
595{
596 WORD i;
597
598 i = ctx->datalen;
599
600 // Pad whatever data is left in the buffer.
601 if (ctx->datalen < 56) {
602 ctx->data[i++] = 0x80;
603 while (i < 56)
604 ctx->data[i++] = 0x00;
605 }
606 else {
607 ctx->data[i++] = 0x80;
608 while (i < 64)
609 ctx->data[i++] = 0x00;
610 sha256_process(ctx->state, ctx->data, 64);
611 memset(ctx->data, 0, 56);
612 }
613
614 // Append to the padding the total message's length in bits and transform.
615 ctx->bitlen += ctx->datalen * 8;
616 ctx->data[63] = ctx->bitlen;
617 ctx->data[62] = ctx->bitlen >> 8;
618 ctx->data[61] = ctx->bitlen >> 16;
619 ctx->data[60] = ctx->bitlen >> 24;
620 ctx->data[59] = ctx->bitlen >> 32;
621 ctx->data[58] = ctx->bitlen >> 40;
622 ctx->data[57] = ctx->bitlen >> 48;
623 ctx->data[56] = ctx->bitlen >> 56;
624 sha256_process(ctx->state, ctx->data, 64);
625
626 // Since this implementation uses little endian byte ordering and SHA uses big endian,
627 // reverse all the bytes when copying the final state to the output hash.
628 for (i = 0; i < 4; ++i) {
629 hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
630 hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
631 hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
632 hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
633 hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
634 hash[i + 20] = (ctx->state[5] >> (24 - i * 8)) & 0x000000ff;
635 hash[i + 24] = (ctx->state[6] >> (24 - i * 8)) & 0x000000ff;
636 hash[i + 28] = (ctx->state[7] >> (24 - i * 8)) & 0x000000ff;
637 }
638}