Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
tpruvot
GitHub Repository: tpruvot/cpuminer-multi
Path: blob/linux/yescrypt/yescrypt-simd.c
1201 views
1
/*-
2
* Copyright 2009 Colin Percival
3
* Copyright 2012-2014 Alexander Peslyak
4
* All rights reserved.
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*
27
* This file was originally written by Colin Percival as part of the Tarsnap
28
* online backup system.
29
*/
30
31
/*
32
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
33
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
34
* and XOP are of further help either way.
35
*/
36
#ifndef __SSE4_1__
37
#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
38
#endif
39
40
#include <emmintrin.h>
41
#ifdef __XOP__
42
#include <x86intrin.h>
43
#endif
44
45
#include <errno.h>
46
#include <stdint.h>
47
#include <stdlib.h>
48
#include <string.h>
49
50
#include "sha256_Y.h"
51
#include "sysendian.h"
52
53
#include "yescrypt.h"
54
#include "yescrypt-platform.h"
55
56
#include "compat.h"
57
58
#if __STDC_VERSION__ >= 199901L
59
/* have restrict */
60
#elif defined(__GNUC__)
61
#define restrict __restrict
62
#else
63
#define restrict
64
#endif
65
66
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
67
#define PREFETCH_OUT(x, hint) /* disabled */
68
69
#ifdef __XOP__
70
#define ARX(out, in1, in2, s) \
71
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
72
#else
73
#define ARX(out, in1, in2, s) \
74
{ \
75
__m128i T = _mm_add_epi32(in1, in2); \
76
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
77
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
78
}
79
#endif
80
81
#define SALSA20_2ROUNDS \
82
/* Operate on "columns" */ \
83
ARX(X1, X0, X3, 7) \
84
ARX(X2, X1, X0, 9) \
85
ARX(X3, X2, X1, 13) \
86
ARX(X0, X3, X2, 18) \
87
\
88
/* Rearrange data */ \
89
X1 = _mm_shuffle_epi32(X1, 0x93); \
90
X2 = _mm_shuffle_epi32(X2, 0x4E); \
91
X3 = _mm_shuffle_epi32(X3, 0x39); \
92
\
93
/* Operate on "rows" */ \
94
ARX(X3, X0, X1, 7) \
95
ARX(X2, X3, X0, 9) \
96
ARX(X1, X2, X3, 13) \
97
ARX(X0, X1, X2, 18) \
98
\
99
/* Rearrange data */ \
100
X1 = _mm_shuffle_epi32(X1, 0x39); \
101
X2 = _mm_shuffle_epi32(X2, 0x4E); \
102
X3 = _mm_shuffle_epi32(X3, 0x93);
103
104
/**
105
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
106
*/
107
#define SALSA20_8_BASE(maybe_decl, out) \
108
{ \
109
maybe_decl Y0 = X0; \
110
maybe_decl Y1 = X1; \
111
maybe_decl Y2 = X2; \
112
maybe_decl Y3 = X3; \
113
SALSA20_2ROUNDS \
114
SALSA20_2ROUNDS \
115
SALSA20_2ROUNDS \
116
SALSA20_2ROUNDS \
117
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
118
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
119
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
120
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
121
}
122
#define SALSA20_8(out) \
123
SALSA20_8_BASE(__m128i, out)
124
125
/**
126
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
127
*/
128
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
129
X0 = _mm_xor_si128(X0, Z0); \
130
X1 = _mm_xor_si128(X1, Z1); \
131
X2 = _mm_xor_si128(X2, Z2); \
132
X3 = _mm_xor_si128(X3, Z3); \
133
SALSA20_8_BASE(maybe_decl, out)
134
135
#define SALSA20_8_XOR_MEM(in, out) \
136
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
137
138
#define SALSA20_8_XOR_REG(out) \
139
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
140
141
typedef union {
142
uint32_t w[16];
143
__m128i q[4];
144
} salsa20_blk_t;
145
146
/**
147
* blockmix_salsa8(Bin, Bout, r):
148
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
149
* bytes in length; the output Bout must also be the same size.
150
*/
151
static inline void
152
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
153
salsa20_blk_t *restrict Bout, size_t r)
154
{
155
__m128i X0, X1, X2, X3;
156
size_t i;
157
158
r--;
159
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
160
for (i = 0; i < r; i++) {
161
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
162
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
163
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
164
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
165
}
166
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
167
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
168
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
169
170
/* 1: X <-- B_{2r - 1} */
171
X0 = Bin[r * 2 + 1].q[0];
172
X1 = Bin[r * 2 + 1].q[1];
173
X2 = Bin[r * 2 + 1].q[2];
174
X3 = Bin[r * 2 + 1].q[3];
175
176
/* 3: X <-- H(X \xor B_i) */
177
/* 4: Y_i <-- X */
178
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
179
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
180
181
/* 2: for i = 0 to 2r - 1 do */
182
for (i = 0; i < r;) {
183
/* 3: X <-- H(X \xor B_i) */
184
/* 4: Y_i <-- X */
185
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
186
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
187
188
i++;
189
190
/* 3: X <-- H(X \xor B_i) */
191
/* 4: Y_i <-- X */
192
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
193
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
194
}
195
196
/* 3: X <-- H(X \xor B_i) */
197
/* 4: Y_i <-- X */
198
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
199
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
200
}
201
202
/*
203
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
204
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
205
* destination registers, whereas the shifts would require an extra move
206
* instruction for our code when building without AVX. Unfortunately, PSHUFD
207
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
208
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
209
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
210
* win in terms of throughput or/and not needing a move instruction, we
211
* currently use it despite of the higher latency on some older CPUs. As an
212
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
213
* when building with SSE4.1 or newer, which is not available on older CPUs
214
* where this instruction has higher latency.
215
*/
216
#if 1
217
#define HI32(X) \
218
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
219
#elif 0
220
#define HI32(X) \
221
_mm_srli_si128((X), 4)
222
#else
223
#define HI32(X) \
224
_mm_srli_epi64((X), 32)
225
#endif
226
227
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
228
/* Intel's name, also supported by recent gcc */
229
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
230
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
231
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
232
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
233
#elif defined(__x86_64__) && defined(__SSE4_1__)
234
/* No known bugs for this intrinsic */
235
#include <smmintrin.h>
236
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
237
#elif defined(__SSE4_1__)
238
/* 32-bit */
239
#include <smmintrin.h>
240
#if 0
241
/* This is currently unused by the code below, which instead uses these two
242
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
243
#define EXTRACT64(X) \
244
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
245
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
246
#endif
247
#else
248
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
249
#define EXTRACT64(X) \
250
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
251
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
252
#endif
253
254
/* This is tunable */
255
#define S_BITS 8
256
257
/* Not tunable in this implementation, hard-coded in a few places */
258
#define S_SIMD 2
259
#define S_P 4
260
261
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
262
#define S_N 2
263
264
/* Derived values. Not tunable except via S_BITS above. */
265
#define S_SIZE1 (1 << S_BITS)
266
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
267
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
268
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
269
270
#if !defined(__x86_64__) && defined(__SSE4_1__)
271
/* 32-bit with SSE4.1 */
272
#define PWXFORM_X_T __m128i
273
#define PWXFORM_SIMD(X, x, s0, s1) \
274
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
275
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
276
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
277
X = _mm_mul_epu32(HI32(X), X); \
278
X = _mm_add_epi64(X, s0); \
279
X = _mm_xor_si128(X, s1);
280
#else
281
/* 64-bit, or 32-bit without SSE4.1 */
282
#define PWXFORM_X_T uint64_t
283
#define PWXFORM_SIMD(X, x, s0, s1) \
284
x = EXTRACT64(X) & S_MASK2; \
285
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
286
s1 = *(const __m128i *)(S1 + (x >> 32)); \
287
X = _mm_mul_epu32(HI32(X), X); \
288
X = _mm_add_epi64(X, s0); \
289
X = _mm_xor_si128(X, s1);
290
#endif
291
292
#define PWXFORM_ROUND \
293
PWXFORM_SIMD(X0, x0, s00, s01) \
294
PWXFORM_SIMD(X1, x1, s10, s11) \
295
PWXFORM_SIMD(X2, x2, s20, s21) \
296
PWXFORM_SIMD(X3, x3, s30, s31)
297
298
#define PWXFORM \
299
{ \
300
PWXFORM_X_T x0, x1, x2, x3; \
301
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
302
PWXFORM_ROUND PWXFORM_ROUND \
303
PWXFORM_ROUND PWXFORM_ROUND \
304
PWXFORM_ROUND PWXFORM_ROUND \
305
}
306
307
#define XOR4(in) \
308
X0 = _mm_xor_si128(X0, (in)[0]); \
309
X1 = _mm_xor_si128(X1, (in)[1]); \
310
X2 = _mm_xor_si128(X2, (in)[2]); \
311
X3 = _mm_xor_si128(X3, (in)[3]);
312
313
#define XOUT(out) \
314
(out)[0] = X0; \
315
(out)[1] = X1; \
316
(out)[2] = X2; \
317
(out)[3] = X3;
318
319
/**
320
* blockmix_pwxform(Bin, Bout, r, S):
321
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
322
* be 128r bytes in length; the output Bout must also be the same size.
323
*/
324
static void
325
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
326
size_t r, const __m128i *restrict S)
327
{
328
const uint8_t * S0, * S1;
329
__m128i X0, X1, X2, X3;
330
size_t i;
331
332
if (!S) {
333
blockmix_salsa8(Bin, Bout, r);
334
return;
335
}
336
337
S0 = (const uint8_t *)S;
338
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
339
340
/* Convert 128-byte blocks to 64-byte blocks */
341
r *= 2;
342
343
r--;
344
PREFETCH(&Bin[r], _MM_HINT_T0)
345
for (i = 0; i < r; i++) {
346
PREFETCH(&Bin[i], _MM_HINT_T0)
347
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
348
}
349
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
350
351
/* X <-- B_{r1 - 1} */
352
X0 = Bin[r].q[0];
353
X1 = Bin[r].q[1];
354
X2 = Bin[r].q[2];
355
X3 = Bin[r].q[3];
356
357
/* for i = 0 to r1 - 1 do */
358
for (i = 0; i < r; i++) {
359
/* X <-- H'(X \xor B_i) */
360
XOR4(Bin[i].q)
361
PWXFORM
362
/* B'_i <-- X */
363
XOUT(Bout[i].q)
364
}
365
366
/* Last iteration of the loop above */
367
XOR4(Bin[i].q)
368
PWXFORM
369
370
/* B'_i <-- H(B'_i) */
371
SALSA20_8(Bout[i].q)
372
}
373
374
#define XOR4_2(in1, in2) \
375
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
376
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
377
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
378
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
379
380
static inline uint32_t
381
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
382
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
383
size_t r, int Bin2_in_ROM)
384
{
385
__m128i X0, X1, X2, X3;
386
size_t i;
387
388
r--;
389
if (Bin2_in_ROM) {
390
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
391
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
392
for (i = 0; i < r; i++) {
393
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
394
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
395
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
396
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
397
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
398
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
399
}
400
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
401
} else {
402
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
403
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
404
for (i = 0; i < r; i++) {
405
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
406
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
407
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
408
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
409
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
410
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
411
}
412
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
413
}
414
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
415
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
416
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
417
418
/* 1: X <-- B_{2r - 1} */
419
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
420
421
/* 3: X <-- H(X \xor B_i) */
422
/* 4: Y_i <-- X */
423
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
424
XOR4(Bin1[0].q)
425
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
426
427
/* 2: for i = 0 to 2r - 1 do */
428
for (i = 0; i < r;) {
429
/* 3: X <-- H(X \xor B_i) */
430
/* 4: Y_i <-- X */
431
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
432
XOR4(Bin1[i * 2 + 1].q)
433
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
434
435
i++;
436
437
/* 3: X <-- H(X \xor B_i) */
438
/* 4: Y_i <-- X */
439
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
440
XOR4(Bin1[i * 2].q)
441
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
442
}
443
444
/* 3: X <-- H(X \xor B_i) */
445
/* 4: Y_i <-- X */
446
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
447
XOR4(Bin1[r * 2 + 1].q)
448
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
449
450
return _mm_cvtsi128_si32(X0);
451
}
452
453
static uint32_t
454
blockmix_xor(const salsa20_blk_t *restrict Bin1,
455
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
456
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
457
{
458
const uint8_t * S0, * S1;
459
__m128i X0, X1, X2, X3;
460
size_t i;
461
462
if (!S)
463
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
464
465
S0 = (const uint8_t *)S;
466
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
467
468
/* Convert 128-byte blocks to 64-byte blocks */
469
r *= 2;
470
471
r--;
472
if (Bin2_in_ROM) {
473
PREFETCH(&Bin2[r], _MM_HINT_NTA)
474
PREFETCH(&Bin1[r], _MM_HINT_T0)
475
for (i = 0; i < r; i++) {
476
PREFETCH(&Bin2[i], _MM_HINT_NTA)
477
PREFETCH(&Bin1[i], _MM_HINT_T0)
478
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
479
}
480
} else {
481
PREFETCH(&Bin2[r], _MM_HINT_T0)
482
PREFETCH(&Bin1[r], _MM_HINT_T0)
483
for (i = 0; i < r; i++) {
484
PREFETCH(&Bin2[i], _MM_HINT_T0)
485
PREFETCH(&Bin1[i], _MM_HINT_T0)
486
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
487
}
488
}
489
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
490
491
/* X <-- B_{r1 - 1} */
492
XOR4_2(Bin1[r].q, Bin2[r].q)
493
494
/* for i = 0 to r1 - 1 do */
495
for (i = 0; i < r; i++) {
496
/* X <-- H'(X \xor B_i) */
497
XOR4(Bin1[i].q)
498
XOR4(Bin2[i].q)
499
PWXFORM
500
/* B'_i <-- X */
501
XOUT(Bout[i].q)
502
}
503
504
/* Last iteration of the loop above */
505
XOR4(Bin1[i].q)
506
XOR4(Bin2[i].q)
507
PWXFORM
508
509
/* B'_i <-- H(B'_i) */
510
SALSA20_8(Bout[i].q)
511
512
return _mm_cvtsi128_si32(X0);
513
}
514
515
#undef XOR4
516
#define XOR4(in, out) \
517
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
518
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
519
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
520
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
521
522
static inline uint32_t
523
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
524
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
525
size_t r)
526
{
527
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
528
size_t i;
529
530
r--;
531
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
532
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
533
for (i = 0; i < r; i++) {
534
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
535
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
536
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
537
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
538
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
539
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
540
}
541
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
542
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
543
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
544
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
545
546
/* 1: X <-- B_{2r - 1} */
547
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
548
549
/* 3: X <-- H(X \xor B_i) */
550
/* 4: Y_i <-- X */
551
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
552
XOR4(Bin1[0].q, Bin2[0].q)
553
SALSA20_8_XOR_REG(Bout[0].q)
554
555
/* 2: for i = 0 to 2r - 1 do */
556
for (i = 0; i < r;) {
557
/* 3: X <-- H(X \xor B_i) */
558
/* 4: Y_i <-- X */
559
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
560
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
561
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
562
563
i++;
564
565
/* 3: X <-- H(X \xor B_i) */
566
/* 4: Y_i <-- X */
567
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
568
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
569
SALSA20_8_XOR_REG(Bout[i].q)
570
}
571
572
/* 3: X <-- H(X \xor B_i) */
573
/* 4: Y_i <-- X */
574
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
575
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
576
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
577
578
return _mm_cvtsi128_si32(X0);
579
}
580
581
#define XOR4_Y \
582
X0 = _mm_xor_si128(X0, Y0); \
583
X1 = _mm_xor_si128(X1, Y1); \
584
X2 = _mm_xor_si128(X2, Y2); \
585
X3 = _mm_xor_si128(X3, Y3);
586
587
static uint32_t
588
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
589
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
590
size_t r, const __m128i *restrict S)
591
{
592
const uint8_t * S0, * S1;
593
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
594
size_t i;
595
596
if (!S)
597
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
598
599
S0 = (const uint8_t *)S;
600
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
601
602
/* Convert 128-byte blocks to 64-byte blocks */
603
r *= 2;
604
605
r--;
606
PREFETCH(&Bin2[r], _MM_HINT_T0)
607
PREFETCH(&Bin1[r], _MM_HINT_T0)
608
for (i = 0; i < r; i++) {
609
PREFETCH(&Bin2[i], _MM_HINT_T0)
610
PREFETCH(&Bin1[i], _MM_HINT_T0)
611
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
612
}
613
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
614
615
/* X <-- B_{r1 - 1} */
616
XOR4_2(Bin1[r].q, Bin2[r].q)
617
618
/* for i = 0 to r1 - 1 do */
619
for (i = 0; i < r; i++) {
620
XOR4(Bin1[i].q, Bin2[i].q)
621
/* X <-- H'(X \xor B_i) */
622
XOR4_Y
623
PWXFORM
624
/* B'_i <-- X */
625
XOUT(Bout[i].q)
626
}
627
628
/* Last iteration of the loop above */
629
XOR4(Bin1[i].q, Bin2[i].q)
630
XOR4_Y
631
PWXFORM
632
633
/* B'_i <-- H(B'_i) */
634
SALSA20_8(Bout[i].q)
635
636
return _mm_cvtsi128_si32(X0);
637
}
638
639
#undef ARX
640
#undef SALSA20_2ROUNDS
641
#undef SALSA20_8
642
#undef SALSA20_8_XOR_ANY
643
#undef SALSA20_8_XOR_MEM
644
#undef SALSA20_8_XOR_REG
645
#undef PWXFORM_SIMD_1
646
#undef PWXFORM_SIMD_2
647
#undef PWXFORM_ROUND
648
#undef PWXFORM
649
#undef OUT
650
#undef XOR4
651
#undef XOR4_2
652
#undef XOR4_Y
653
654
/**
655
* integerify(B, r):
656
* Return the result of parsing B_{2r-1} as a little-endian integer.
657
*/
658
static inline uint32_t
659
integerify(const salsa20_blk_t * B, size_t r)
660
{
661
return B[2 * r - 1].w[0];
662
}
663
664
/**
665
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
666
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
667
* length; the temporary storage V must be 128rN bytes in length; the temporary
668
* storage XY must be 128r bytes in length. The value N must be even and no
669
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
670
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
671
* bytes as well saves cache lines, but might result in cache bank conflicts).
672
*/
673
static void
674
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
675
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
676
salsa20_blk_t * XY, void * S)
677
{
678
const salsa20_blk_t * VROM = shared->shared1.aligned;
679
uint32_t VROM_mask = shared->mask1;
680
size_t s = 2 * r;
681
salsa20_blk_t * X = V, * Y;
682
uint32_t i, j;
683
size_t k;
684
685
/* 1: X <-- B */
686
/* 3: V_i <-- X */
687
for (k = 0; k < 2 * r; k++) {
688
for (i = 0; i < 16; i++) {
689
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
690
}
691
}
692
693
if (NROM && (VROM_mask & 1)) {
694
uint32_t n;
695
salsa20_blk_t * V_n;
696
const salsa20_blk_t * V_j;
697
698
/* 4: X <-- H(X) */
699
/* 3: V_i <-- X */
700
Y = &V[s];
701
blockmix(X, Y, r, S);
702
703
X = &V[2 * s];
704
if ((1 & VROM_mask) == 1) {
705
/* j <-- Integerify(X) mod NROM */
706
j = integerify(Y, r) & (NROM - 1);
707
V_j = &VROM[j * s];
708
709
/* X <-- H(X \xor VROM_j) */
710
j = blockmix_xor(Y, V_j, X, r, 1, S);
711
} else {
712
/* X <-- H(X) */
713
blockmix(Y, X, r, S);
714
j = integerify(X, r);
715
}
716
717
for (n = 2; n < N; n <<= 1) {
718
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
719
720
V_n = &V[n * s];
721
722
/* 2: for i = 0 to N - 1 do */
723
for (i = 1; i < m; i += 2) {
724
/* j <-- Wrap(Integerify(X), i) */
725
j &= n - 1;
726
j += i - 1;
727
V_j = &V[j * s];
728
729
/* X <-- X \xor V_j */
730
/* 4: X <-- H(X) */
731
/* 3: V_i <-- X */
732
Y = &V_n[i * s];
733
j = blockmix_xor(X, V_j, Y, r, 0, S);
734
735
if (((n + i) & VROM_mask) == 1) {
736
/* j <-- Integerify(X) mod NROM */
737
j &= NROM - 1;
738
V_j = &VROM[j * s];
739
} else {
740
/* j <-- Wrap(Integerify(X), i) */
741
j &= n - 1;
742
j += i;
743
V_j = &V[j * s];
744
}
745
746
/* X <-- H(X \xor VROM_j) */
747
X = &V_n[(i + 1) * s];
748
j = blockmix_xor(Y, V_j, X, r, 1, S);
749
}
750
}
751
752
n >>= 1;
753
754
/* j <-- Wrap(Integerify(X), i) */
755
j &= n - 1;
756
j += N - 2 - n;
757
V_j = &V[j * s];
758
759
/* X <-- X \xor V_j */
760
/* 4: X <-- H(X) */
761
/* 3: V_i <-- X */
762
Y = &V[(N - 1) * s];
763
j = blockmix_xor(X, V_j, Y, r, 0, S);
764
765
if (((N - 1) & VROM_mask) == 1) {
766
/* j <-- Integerify(X) mod NROM */
767
j &= NROM - 1;
768
V_j = &VROM[j * s];
769
} else {
770
/* j <-- Wrap(Integerify(X), i) */
771
j &= n - 1;
772
j += N - 1 - n;
773
V_j = &V[j * s];
774
}
775
776
/* X <-- X \xor V_j */
777
/* 4: X <-- H(X) */
778
X = XY;
779
blockmix_xor(Y, V_j, X, r, 1, S);
780
} else if (flags & YESCRYPT_RW) {
781
uint32_t n;
782
salsa20_blk_t * V_n, * V_j;
783
784
/* 4: X <-- H(X) */
785
/* 3: V_i <-- X */
786
Y = &V[s];
787
blockmix(X, Y, r, S);
788
789
/* 4: X <-- H(X) */
790
/* 3: V_i <-- X */
791
X = &V[2 * s];
792
blockmix(Y, X, r, S);
793
j = integerify(X, r);
794
795
for (n = 2; n < N; n <<= 1) {
796
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
797
798
V_n = &V[n * s];
799
800
/* 2: for i = 0 to N - 1 do */
801
for (i = 1; i < m; i += 2) {
802
Y = &V_n[i * s];
803
804
/* j <-- Wrap(Integerify(X), i) */
805
j &= n - 1;
806
j += i - 1;
807
V_j = &V[j * s];
808
809
/* X <-- X \xor V_j */
810
/* 4: X <-- H(X) */
811
/* 3: V_i <-- X */
812
j = blockmix_xor(X, V_j, Y, r, 0, S);
813
814
/* j <-- Wrap(Integerify(X), i) */
815
j &= n - 1;
816
j += i;
817
V_j = &V[j * s];
818
819
/* X <-- X \xor V_j */
820
/* 4: X <-- H(X) */
821
/* 3: V_i <-- X */
822
X = &V_n[(i + 1) * s];
823
j = blockmix_xor(Y, V_j, X, r, 0, S);
824
}
825
}
826
827
n >>= 1;
828
829
/* j <-- Wrap(Integerify(X), i) */
830
j &= n - 1;
831
j += N - 2 - n;
832
V_j = &V[j * s];
833
834
/* X <-- X \xor V_j */
835
/* 4: X <-- H(X) */
836
/* 3: V_i <-- X */
837
Y = &V[(N - 1) * s];
838
j = blockmix_xor(X, V_j, Y, r, 0, S);
839
840
/* j <-- Wrap(Integerify(X), i) */
841
j &= n - 1;
842
j += N - 1 - n;
843
V_j = &V[j * s];
844
845
/* X <-- X \xor V_j */
846
/* 4: X <-- H(X) */
847
X = XY;
848
blockmix_xor(Y, V_j, X, r, 0, S);
849
} else {
850
/* 2: for i = 0 to N - 1 do */
851
for (i = 1; i < N - 1; i += 2) {
852
/* 4: X <-- H(X) */
853
/* 3: V_i <-- X */
854
Y = &V[i * s];
855
blockmix(X, Y, r, S);
856
857
/* 4: X <-- H(X) */
858
/* 3: V_i <-- X */
859
X = &V[(i + 1) * s];
860
blockmix(Y, X, r, S);
861
}
862
863
/* 4: X <-- H(X) */
864
/* 3: V_i <-- X */
865
Y = &V[i * s];
866
blockmix(X, Y, r, S);
867
868
/* 4: X <-- H(X) */
869
X = XY;
870
blockmix(Y, X, r, S);
871
}
872
873
/* B' <-- X */
874
for (k = 0; k < 2 * r; k++) {
875
for (i = 0; i < 16; i++) {
876
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
877
}
878
}
879
}
880
881
/**
882
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
883
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
884
* length; the temporary storage V must be 128rN bytes in length; the temporary
885
* storage XY must be 256r bytes in length. The value N must be a power of 2
886
* greater than 1. The value Nloop must be even. The array V must be aligned
887
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
888
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
889
* in cache bank conflicts).
890
*/
891
static void
892
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
893
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
894
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
895
{
896
const salsa20_blk_t * VROM = shared->shared1.aligned;
897
uint32_t VROM_mask = shared->mask1;
898
size_t s = 2 * r;
899
salsa20_blk_t * X = XY, * Y = &XY[s];
900
uint64_t i;
901
uint32_t j;
902
size_t k;
903
904
if (Nloop == 0)
905
return;
906
907
/* X <-- B' */
908
/* 3: V_i <-- X */
909
for (k = 0; k < 2 * r; k++) {
910
for (i = 0; i < 16; i++) {
911
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
912
}
913
}
914
915
i = Nloop / 2;
916
917
/* 7: j <-- Integerify(X) mod N */
918
j = integerify(X, r) & (N - 1);
919
920
/*
921
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
922
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
923
* operating on the entire V.
924
*/
925
if (NROM && (flags & YESCRYPT_RW)) {
926
/* 6: for i = 0 to N - 1 do */
927
for (i = 0; i < Nloop; i += 2) {
928
salsa20_blk_t * V_j = &V[j * s];
929
930
/* 8: X <-- H(X \xor V_j) */
931
/* V_j <-- Xprev \xor V_j */
932
/* j <-- Integerify(X) mod NROM */
933
j = blockmix_xor_save(X, V_j, Y, r, S);
934
935
if (((i + 1) & VROM_mask) == 1) {
936
const salsa20_blk_t * VROM_j;
937
938
j &= NROM - 1;
939
VROM_j = &VROM[j * s];
940
941
/* X <-- H(X \xor VROM_j) */
942
/* 7: j <-- Integerify(X) mod N */
943
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
944
} else {
945
j &= N - 1;
946
V_j = &V[j * s];
947
948
/* 8: X <-- H(X \xor V_j) */
949
/* V_j <-- Xprev \xor V_j */
950
/* j <-- Integerify(X) mod NROM */
951
j = blockmix_xor_save(Y, V_j, X, r, S);
952
}
953
j &= N - 1;
954
V_j = &V[j * s];
955
}
956
} else if (NROM) {
957
/* 6: for i = 0 to N - 1 do */
958
for (i = 0; i < Nloop; i += 2) {
959
const salsa20_blk_t * V_j = &V[j * s];
960
961
/* 8: X <-- H(X \xor V_j) */
962
/* V_j <-- Xprev \xor V_j */
963
/* j <-- Integerify(X) mod NROM */
964
j = blockmix_xor(X, V_j, Y, r, 0, S);
965
966
if (((i + 1) & VROM_mask) == 1) {
967
j &= NROM - 1;
968
V_j = &VROM[j * s];
969
} else {
970
j &= N - 1;
971
V_j = &V[j * s];
972
}
973
974
/* X <-- H(X \xor VROM_j) */
975
/* 7: j <-- Integerify(X) mod N */
976
j = blockmix_xor(Y, V_j, X, r, 1, S);
977
j &= N - 1;
978
V_j = &V[j * s];
979
}
980
} else if (flags & YESCRYPT_RW) {
981
/* 6: for i = 0 to N - 1 do */
982
do {
983
salsa20_blk_t * V_j = &V[j * s];
984
985
/* 8: X <-- H(X \xor V_j) */
986
/* V_j <-- Xprev \xor V_j */
987
/* 7: j <-- Integerify(X) mod N */
988
j = blockmix_xor_save(X, V_j, Y, r, S);
989
j &= N - 1;
990
V_j = &V[j * s];
991
992
/* 8: X <-- H(X \xor V_j) */
993
/* V_j <-- Xprev \xor V_j */
994
/* 7: j <-- Integerify(X) mod N */
995
j = blockmix_xor_save(Y, V_j, X, r, S);
996
j &= N - 1;
997
} while (--i);
998
} else {
999
/* 6: for i = 0 to N - 1 do */
1000
do {
1001
const salsa20_blk_t * V_j = &V[j * s];
1002
1003
/* 8: X <-- H(X \xor V_j) */
1004
/* 7: j <-- Integerify(X) mod N */
1005
j = blockmix_xor(X, V_j, Y, r, 0, S);
1006
j &= N - 1;
1007
V_j = &V[j * s];
1008
1009
/* 8: X <-- H(X \xor V_j) */
1010
/* 7: j <-- Integerify(X) mod N */
1011
j = blockmix_xor(Y, V_j, X, r, 0, S);
1012
j &= N - 1;
1013
} while (--i);
1014
}
1015
1016
/* 10: B' <-- X */
1017
for (k = 0; k < 2 * r; k++) {
1018
for (i = 0; i < 16; i++) {
1019
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
1020
}
1021
}
1022
}
1023
1024
/**
1025
* p2floor(x):
1026
* Largest power of 2 not greater than argument.
1027
*/
1028
static uint64_t
1029
p2floor(uint64_t x)
1030
{
1031
uint64_t y;
1032
while ((y = x & (x - 1)))
1033
x = y;
1034
return x;
1035
}
1036
1037
/**
1038
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
1039
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
1040
* temporary storage V must be 128rN bytes in length; the temporary storage XY
1041
* must be 256r or 256rp bytes in length (the larger size is required with
1042
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
1043
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
1044
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
1045
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
1046
* when p > 1, but it might also result in cache bank conflicts).
1047
*/
1048
static void
1049
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
1050
yescrypt_flags_t flags,
1051
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
1052
salsa20_blk_t * XY, void * S)
1053
{
1054
size_t s = 2 * r;
1055
uint32_t Nchunk = N / p;
1056
uint64_t Nloop_all, Nloop_rw;
1057
uint32_t i;
1058
1059
Nloop_all = Nchunk;
1060
if (flags & YESCRYPT_RW) {
1061
if (t <= 1) {
1062
if (t)
1063
Nloop_all *= 2; /* 2/3 */
1064
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
1065
} else {
1066
Nloop_all *= t - 1;
1067
}
1068
} else if (t) {
1069
if (t == 1)
1070
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
1071
Nloop_all *= t;
1072
}
1073
1074
Nloop_rw = 0;
1075
if (flags & __YESCRYPT_INIT_SHARED)
1076
Nloop_rw = Nloop_all;
1077
else if (flags & YESCRYPT_RW)
1078
Nloop_rw = Nloop_all / p;
1079
1080
Nchunk &= ~(uint32_t)1; /* round down to even */
1081
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
1082
Nloop_rw &= ~(uint64_t)1; /* round down to even */
1083
1084
#ifdef _OPENMP
1085
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
1086
{
1087
#pragma omp for
1088
#endif
1089
for (i = 0; i < p; i++) {
1090
uint32_t Vchunk = i * Nchunk;
1091
uint8_t * Bp = &B[128 * r * i];
1092
salsa20_blk_t * Vp = &V[Vchunk * s];
1093
#ifdef _OPENMP
1094
salsa20_blk_t * XYp = &XY[i * (2 * s)];
1095
#else
1096
salsa20_blk_t * XYp = XY;
1097
#endif
1098
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
1099
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
1100
if (Sp)
1101
smix1(Bp, 1, S_SIZE_ALL / 128,
1102
flags & ~YESCRYPT_PWXFORM,
1103
Sp, NROM, shared, XYp, NULL);
1104
if (!(flags & __YESCRYPT_INIT_SHARED_2))
1105
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
1106
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
1107
NROM, shared, XYp, Sp);
1108
}
1109
1110
if (Nloop_all > Nloop_rw) {
1111
#ifdef _OPENMP
1112
#pragma omp for
1113
#endif
1114
for (i = 0; i < p; i++) {
1115
uint8_t * Bp = &B[128 * r * i];
1116
#ifdef _OPENMP
1117
salsa20_blk_t * XYp = &XY[i * (2 * s)];
1118
#else
1119
salsa20_blk_t * XYp = XY;
1120
#endif
1121
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
1122
smix2(Bp, r, N, Nloop_all - Nloop_rw,
1123
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
1124
}
1125
}
1126
#ifdef _OPENMP
1127
}
1128
#endif
1129
}
1130
1131
/**
1132
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
1133
* N, r, p, t, flags, buf, buflen):
1134
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
1135
* p, buflen), or a revision of scrypt as requested by flags and shared, and
1136
* write the result into buf. The parameters r, p, and buflen must satisfy
1137
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
1138
* of 2 greater than 1. (This optimized implementation currently additionally
1139
* limits N to the range from 8 to 2^31, but other implementation might not.)
1140
*
1141
* t controls computation time while not affecting peak memory usage. shared
1142
* and flags may request special modes as described in yescrypt.h. local is
1143
* the thread-local data structure, allowing to preserve and reuse a memory
1144
* allocation across calls, thereby reducing its overhead.
1145
*
1146
* Return 0 on success; or -1 on error.
1147
*/
1148
int
1149
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
1150
const uint8_t * passwd, size_t passwdlen,
1151
const uint8_t * salt, size_t saltlen,
1152
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
1153
uint8_t * buf, size_t buflen)
1154
{
1155
uint8_t _ALIGN(128) sha256[32];
1156
yescrypt_region_t tmp;
1157
uint64_t NROM;
1158
size_t B_size, V_size, XY_size, need;
1159
uint8_t * B, * S;
1160
salsa20_blk_t * V, * XY;
1161
1162
/*
1163
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
1164
* so don't let it have side-effects. Without this adjustment, it'd
1165
* enable the SHA-256 password pre-hashing and output post-hashing,
1166
* because any deviation from classic scrypt implies those.
1167
*/
1168
if (p == 1)
1169
flags &= ~YESCRYPT_PARALLEL_SMIX;
1170
1171
/* Sanity-check parameters */
1172
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
1173
errno = EINVAL;
1174
return -1;
1175
}
1176
#if SIZE_MAX > UINT32_MAX
1177
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
1178
errno = EFBIG;
1179
return -1;
1180
}
1181
#endif
1182
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
1183
errno = EFBIG;
1184
return -1;
1185
}
1186
if (N > UINT32_MAX) {
1187
errno = EFBIG;
1188
return -1;
1189
}
1190
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
1191
errno = EINVAL;
1192
return -1;
1193
}
1194
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
1195
errno = EINVAL;
1196
return -1;
1197
}
1198
if ((r > SIZE_MAX / 256 / p) ||
1199
(N > SIZE_MAX / 128 / r)) {
1200
errno = ENOMEM;
1201
return -1;
1202
}
1203
#ifdef _OPENMP
1204
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
1205
(N > SIZE_MAX / 128 / (r * p))) {
1206
errno = ENOMEM;
1207
return -1;
1208
}
1209
#endif
1210
if ((flags & YESCRYPT_PWXFORM) &&
1211
#ifndef _OPENMP
1212
(flags & YESCRYPT_PARALLEL_SMIX) &&
1213
#endif
1214
p > SIZE_MAX / S_SIZE_ALL) {
1215
errno = ENOMEM;
1216
return -1;
1217
}
1218
1219
NROM = 0;
1220
if (shared->shared1.aligned) {
1221
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
1222
if (NROM > UINT32_MAX) {
1223
errno = EFBIG;
1224
return -1;
1225
}
1226
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
1227
!(flags & YESCRYPT_RW)) {
1228
errno = EINVAL;
1229
return -1;
1230
}
1231
}
1232
1233
/* Allocate memory */
1234
V = NULL;
1235
V_size = (size_t)128 * r * N;
1236
#ifdef _OPENMP
1237
if (!(flags & YESCRYPT_PARALLEL_SMIX))
1238
V_size *= p;
1239
#endif
1240
need = V_size;
1241
if (flags & __YESCRYPT_INIT_SHARED) {
1242
if (local->aligned_size < need) {
1243
if (local->base || local->aligned ||
1244
local->base_size || local->aligned_size) {
1245
errno = EINVAL;
1246
return -1;
1247
}
1248
if (!alloc_region(local, need))
1249
return -1;
1250
}
1251
V = (salsa20_blk_t *)local->aligned;
1252
need = 0;
1253
}
1254
B_size = (size_t)128 * r * p;
1255
need += B_size;
1256
if (need < B_size) {
1257
errno = ENOMEM;
1258
return -1;
1259
}
1260
XY_size = (size_t)256 * r;
1261
#ifdef _OPENMP
1262
XY_size *= p;
1263
#endif
1264
need += XY_size;
1265
if (need < XY_size) {
1266
errno = ENOMEM;
1267
return -1;
1268
}
1269
if (flags & YESCRYPT_PWXFORM) {
1270
size_t S_size = S_SIZE_ALL;
1271
#ifdef _OPENMP
1272
S_size *= p;
1273
#else
1274
if (flags & YESCRYPT_PARALLEL_SMIX)
1275
S_size *= p;
1276
#endif
1277
need += S_size;
1278
if (need < S_size) {
1279
errno = ENOMEM;
1280
return -1;
1281
}
1282
}
1283
if (flags & __YESCRYPT_INIT_SHARED) {
1284
if (!alloc_region(&tmp, need))
1285
return -1;
1286
B = (uint8_t *)tmp.aligned;
1287
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
1288
} else {
1289
init_region(&tmp);
1290
if (local->aligned_size < need) {
1291
if (free_region(local))
1292
return -1;
1293
if (!alloc_region(local, need))
1294
return -1;
1295
}
1296
B = (uint8_t *)local->aligned;
1297
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
1298
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
1299
}
1300
S = NULL;
1301
if (flags & YESCRYPT_PWXFORM)
1302
S = (uint8_t *)XY + XY_size;
1303
1304
if (t || flags) {
1305
SHA256_CTX_Y ctx;
1306
SHA256_Init_Y(&ctx);
1307
SHA256_Update_Y(&ctx, passwd, passwdlen);
1308
SHA256_Final_Y(sha256, &ctx);
1309
passwd = sha256;
1310
passwdlen = sizeof(sha256);
1311
}
1312
1313
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
1314
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
1315
1316
if (t || flags)
1317
memcpy(sha256, B, sizeof(sha256));
1318
1319
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
1320
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
1321
} else {
1322
uint32_t i;
1323
1324
/* 2: for i = 0 to p - 1 do */
1325
#ifdef _OPENMP
1326
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
1327
#endif
1328
for (i = 0; i < p; i++) {
1329
/* 3: B_i <-- MF(B_i, N) */
1330
#ifdef _OPENMP
1331
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
1332
&V[(size_t)2 * r * i * N],
1333
NROM, shared,
1334
&XY[(size_t)4 * r * i],
1335
S ? &S[S_SIZE_ALL * i] : S);
1336
#else
1337
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
1338
NROM, shared, XY, S);
1339
#endif
1340
}
1341
}
1342
1343
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
1344
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
1345
1346
/*
1347
* Except when computing classic scrypt, allow all computation so far
1348
* to be performed on the client. The final steps below match those of
1349
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
1350
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
1351
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
1352
*/
1353
if ((t || flags) && buflen == sizeof(sha256)) {
1354
/* Compute ClientKey */
1355
{
1356
HMAC_SHA256_CTX_Y ctx;
1357
HMAC_SHA256_Init_Y(&ctx, buf, buflen);
1358
if (yescrypt_client_key != NULL)
1359
HMAC_SHA256_Update_Y(&ctx, yescrypt_client_key,
1360
yescrypt_client_key_len);
1361
else
1362
/* GlobalBoost-Y buggy yescrypt */
1363
HMAC_SHA256_Update_Y(&ctx, salt, saltlen);
1364
HMAC_SHA256_Final_Y(sha256, &ctx);
1365
}
1366
/* Compute StoredKey */
1367
{
1368
SHA256_CTX_Y ctx;
1369
SHA256_Init_Y(&ctx);
1370
SHA256_Update_Y(&ctx, sha256, sizeof(sha256));
1371
SHA256_Final_Y(buf, &ctx);
1372
}
1373
}
1374
1375
if (free_region(&tmp))
1376
return -1;
1377
1378
/* Success! */
1379
return 0;
1380
}
1381
1382