Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
tpruvot
GitHub Repository: tpruvot/cpuminer-multi
Path: blob/linux/algo/pluck.c
1201 views
1
/*
2
* Copyright 2009 Colin Percival, 2011 ArtForz, 2011-2014 pooler, 2015 Jordan Earls
3
* All rights reserved.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* 2. Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
13
*
14
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24
* SUCH DAMAGE.
25
*/
26
27
#include "cpuminer-config.h"
28
#include "miner.h"
29
30
#include <stdlib.h>
31
#include <string.h>
32
33
#define BLOCK_HEADER_SIZE 80
34
35
// windows
36
#ifndef htobe32
37
#define htobe32(x) ((uint32_t)htonl((uint32_t)(x)))
38
#endif
39
40
#ifdef _MSC_VER
41
#define ROTL(a, b) _rotl(a,b)
42
#define ROTR(a, b) _rotr(a,b)
43
#else
44
#define ROTL(a, b) (((a) << b) | ((a) >> (32 - b)))
45
#define ROTR(a, b) ((a >> b) | (a << (32 - b)))
46
#endif
47
48
#if defined(_MSC_VER) && defined(_M_X64)
49
#define _VECTOR __vectorcall
50
#include <intrin.h>
51
//#include <emmintrin.h> //SSE2
52
//#include <pmmintrin.h> //SSE3
53
//#include <tmmintrin.h> //SSSE3
54
//#include <smmintrin.h> //SSE4.1
55
//#include <nmmintrin.h> //SSE4.2
56
//#include <ammintrin.h> //SSE4A
57
//#include <wmmintrin.h> //AES
58
//#include <immintrin.h> //AVX
59
#define OPT_COMPATIBLE
60
#elif defined(__GNUC__) && defined(__x86_64__)
61
#include <x86intrin.h>
62
#define _VECTOR
63
#endif
64
65
#ifdef OPT_COMPATIBLE
66
static void _VECTOR xor_salsa8(__m128i B[4], const __m128i Bx[4], int i)
67
{
68
__m128i X0, X1, X2, X3;
69
70
if (i <= 128) {
71
// a xor 0 = a
72
X0 = B[0] = Bx[0];
73
X1 = B[1] = Bx[1];
74
X2 = B[2] = Bx[2];
75
X3 = B[3] = Bx[3];
76
} else {
77
X0 = B[0] = _mm_xor_si128(B[0], Bx[0]);
78
X1 = B[1] = _mm_xor_si128(B[1], Bx[1]);
79
X2 = B[2] = _mm_xor_si128(B[2], Bx[2]);
80
X3 = B[3] = _mm_xor_si128(B[3], Bx[3]);
81
}
82
83
for (i = 0; i < 4; i++) {
84
/* Operate on columns. */
85
X1.m128i_u32[0] ^= ROTL(X0.m128i_u32[0] + X3.m128i_u32[0], 7);
86
X2.m128i_u32[1] ^= ROTL(X1.m128i_u32[1] + X0.m128i_u32[1], 7);
87
X3.m128i_u32[2] ^= ROTL(X2.m128i_u32[2] + X1.m128i_u32[2], 7);
88
X0.m128i_u32[3] ^= ROTL(X3.m128i_u32[3] + X2.m128i_u32[3], 7);
89
90
X2.m128i_u32[0] ^= ROTL(X1.m128i_u32[0] + X0.m128i_u32[0], 9);
91
X3.m128i_u32[1] ^= ROTL(X2.m128i_u32[1] + X1.m128i_u32[1], 9);
92
X0.m128i_u32[2] ^= ROTL(X3.m128i_u32[2] + X2.m128i_u32[2], 9);
93
X1.m128i_u32[3] ^= ROTL(X0.m128i_u32[3] + X3.m128i_u32[3], 9);
94
95
X3.m128i_u32[0] ^= ROTL(X2.m128i_u32[0] + X1.m128i_u32[0], 13);
96
X0.m128i_u32[1] ^= ROTL(X3.m128i_u32[1] + X2.m128i_u32[1], 13);
97
X1.m128i_u32[2] ^= ROTL(X0.m128i_u32[2] + X3.m128i_u32[2], 13);
98
X2.m128i_u32[3] ^= ROTL(X1.m128i_u32[3] + X0.m128i_u32[3], 13);
99
100
X0.m128i_u32[0] ^= ROTL(X3.m128i_u32[0] + X2.m128i_u32[0], 18);
101
X1.m128i_u32[1] ^= ROTL(X0.m128i_u32[1] + X3.m128i_u32[1], 18);
102
X2.m128i_u32[2] ^= ROTL(X1.m128i_u32[2] + X0.m128i_u32[2], 18);
103
X3.m128i_u32[3] ^= ROTL(X2.m128i_u32[3] + X1.m128i_u32[3], 18);
104
105
/* Operate on rows. */
106
X0.m128i_u32[1] ^= ROTL(X0.m128i_u32[0] + X0.m128i_u32[3], 7); X1.m128i_u32[2] ^= ROTL(X1.m128i_u32[1] + X1.m128i_u32[0], 7);
107
X2.m128i_u32[3] ^= ROTL(X2.m128i_u32[2] + X2.m128i_u32[1], 7); X3.m128i_u32[0] ^= ROTL(X3.m128i_u32[3] + X3.m128i_u32[2], 7);
108
X0.m128i_u32[2] ^= ROTL(X0.m128i_u32[1] + X0.m128i_u32[0], 9); X1.m128i_u32[3] ^= ROTL(X1.m128i_u32[2] + X1.m128i_u32[1], 9);
109
X2.m128i_u32[0] ^= ROTL(X2.m128i_u32[3] + X2.m128i_u32[2], 9); X3.m128i_u32[1] ^= ROTL(X3.m128i_u32[0] + X3.m128i_u32[3], 9);
110
111
X0.m128i_u32[3] ^= ROTL(X0.m128i_u32[2] + X0.m128i_u32[1], 13); X1.m128i_u32[0] ^= ROTL(X1.m128i_u32[3] + X1.m128i_u32[2], 13);
112
X2.m128i_u32[1] ^= ROTL(X2.m128i_u32[0] + X2.m128i_u32[3], 13); X3.m128i_u32[2] ^= ROTL(X3.m128i_u32[1] + X3.m128i_u32[0], 13);
113
X0.m128i_u32[0] ^= ROTL(X0.m128i_u32[3] + X0.m128i_u32[2], 18); X1.m128i_u32[1] ^= ROTL(X1.m128i_u32[0] + X1.m128i_u32[3], 18);
114
X2.m128i_u32[2] ^= ROTL(X2.m128i_u32[1] + X2.m128i_u32[0], 18); X3.m128i_u32[3] ^= ROTL(X3.m128i_u32[2] + X3.m128i_u32[1], 18);
115
}
116
117
B[0] = _mm_add_epi32(B[0], X0);
118
B[1] = _mm_add_epi32(B[1], X1);
119
B[2] = _mm_add_epi32(B[2], X2);
120
B[3] = _mm_add_epi32(B[3], X3);
121
}
122
123
#else
124
125
static inline void xor_salsa8(uint32_t B[16], const uint32_t Bx[16], int i)
126
{
127
uint32_t x00,x01,x02,x03,x04,x05,x06,x07,x08,x09,x10,x11,x12,x13,x14,x15;
128
129
if (i <= 128) {
130
// a xor 0 = a
131
x00 = B[ 0] = Bx[ 0]; x01 = B[ 1] = Bx[ 1]; x02 = B[ 2] = Bx[ 2]; x03 = B[ 3] = Bx[ 3];
132
x04 = B[ 4] = Bx[ 4]; x05 = B[ 5] = Bx[ 5]; x06 = B[ 6] = Bx[ 6]; x07 = B[ 7] = Bx[ 7];
133
x08 = B[ 8] = Bx[ 8]; x09 = B[ 9] = Bx[ 9]; x10 = B[10] = Bx[10]; x11 = B[11] = Bx[11];
134
x12 = B[12] = Bx[12]; x13 = B[13] = Bx[13]; x14 = B[14] = Bx[14]; x15 = B[15] = Bx[15];
135
} else {
136
x00 = (B[ 0] ^= Bx[ 0]);
137
x01 = (B[ 1] ^= Bx[ 1]);
138
x02 = (B[ 2] ^= Bx[ 2]);
139
x03 = (B[ 3] ^= Bx[ 3]);
140
x04 = (B[ 4] ^= Bx[ 4]);
141
x05 = (B[ 5] ^= Bx[ 5]);
142
x06 = (B[ 6] ^= Bx[ 6]);
143
x07 = (B[ 7] ^= Bx[ 7]);
144
x08 = (B[ 8] ^= Bx[ 8]);
145
x09 = (B[ 9] ^= Bx[ 9]);
146
x10 = (B[10] ^= Bx[10]);
147
x11 = (B[11] ^= Bx[11]);
148
x12 = (B[12] ^= Bx[12]);
149
x13 = (B[13] ^= Bx[13]);
150
x14 = (B[14] ^= Bx[14]);
151
x15 = (B[15] ^= Bx[15]);
152
}
153
154
for (i = 0; i < 8; i += 2) {
155
/* Operate on columns. */
156
x04 ^= ROTL(x00 + x12, 7); x09 ^= ROTL(x05 + x01, 7);
157
x14 ^= ROTL(x10 + x06, 7); x03 ^= ROTL(x15 + x11, 7);
158
159
x08 ^= ROTL(x04 + x00, 9); x13 ^= ROTL(x09 + x05, 9);
160
x02 ^= ROTL(x14 + x10, 9); x07 ^= ROTL(x03 + x15, 9);
161
162
x12 ^= ROTL(x08 + x04, 13); x01 ^= ROTL(x13 + x09, 13);
163
x06 ^= ROTL(x02 + x14, 13); x11 ^= ROTL(x07 + x03, 13);
164
165
x00 ^= ROTL(x12 + x08, 18); x05 ^= ROTL(x01 + x13, 18);
166
x10 ^= ROTL(x06 + x02, 18); x15 ^= ROTL(x11 + x07, 18);
167
168
/* Operate on rows. */
169
x01 ^= ROTL(x00 + x03, 7); x06 ^= ROTL(x05 + x04, 7);
170
x11 ^= ROTL(x10 + x09, 7); x12 ^= ROTL(x15 + x14, 7);
171
172
x02 ^= ROTL(x01 + x00, 9); x07 ^= ROTL(x06 + x05, 9);
173
x08 ^= ROTL(x11 + x10, 9); x13 ^= ROTL(x12 + x15, 9);
174
175
x03 ^= ROTL(x02 + x01, 13); x04 ^= ROTL(x07 + x06, 13);
176
x09 ^= ROTL(x08 + x11, 13); x14 ^= ROTL(x13 + x12, 13);
177
178
x00 ^= ROTL(x03 + x02, 18); x05 ^= ROTL(x04 + x07, 18);
179
x10 ^= ROTL(x09 + x08, 18); x15 ^= ROTL(x14 + x13, 18);
180
}
181
B[ 0] += x00;
182
B[ 1] += x01;
183
B[ 2] += x02;
184
B[ 3] += x03;
185
B[ 4] += x04;
186
B[ 5] += x05;
187
B[ 6] += x06;
188
B[ 7] += x07;
189
B[ 8] += x08;
190
B[ 9] += x09;
191
B[10] += x10;
192
B[11] += x11;
193
B[12] += x12;
194
B[13] += x13;
195
B[14] += x14;
196
B[15] += x15;
197
}
198
199
#endif
200
201
static const uint32_t sha256_k[64] = {
202
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
203
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
204
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
205
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
206
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
207
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
208
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
209
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
210
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
211
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
212
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
213
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
214
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
215
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
216
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
217
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
218
};
219
220
/* Elementary functions used by SHA256 */
221
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
222
#define Maj(x, y, z) ((x & (y | z)) | (y & z))
223
#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
224
#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
225
#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ (x >> 3))
226
#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ (x >> 10))
227
228
/* SHA256 round function */
229
#define RND(a, b, c, d, e, f, g, h, k) \
230
do { \
231
t0 = h + S1(e) + Ch(e, f, g) + k; \
232
t1 = S0(a) + Maj(a, b, c); \
233
d += t0; \
234
h = t0 + t1; \
235
} while (0)
236
237
/* Adjusted round function for rotating state */
238
#define RNDr(S, W, i) \
239
RND(S[(64 - i) % 8], S[(65 - i) % 8], \
240
S[(66 - i) % 8], S[(67 - i) % 8], \
241
S[(68 - i) % 8], S[(69 - i) % 8], \
242
S[(70 - i) % 8], S[(71 - i) % 8], \
243
W[i] + sha256_k[i])
244
245
246
static void sha256_transform_volatile(uint32_t *state, uint32_t *block)
247
{
248
uint32_t* W=block; //note: block needs to be a mutable 64 int32_t
249
uint32_t S[8];
250
uint32_t t0, t1;
251
int i;
252
253
for (i = 16; i < 64; i += 2) {
254
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
255
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15];
256
}
257
258
/* 2. Initialize working variables. */
259
memcpy(S, state, 32);
260
261
/* 3. Mix. */
262
RNDr(S, W, 0);
263
RNDr(S, W, 1);
264
RNDr(S, W, 2);
265
RNDr(S, W, 3);
266
RNDr(S, W, 4);
267
RNDr(S, W, 5);
268
RNDr(S, W, 6);
269
RNDr(S, W, 7);
270
RNDr(S, W, 8);
271
RNDr(S, W, 9);
272
RNDr(S, W, 10);
273
RNDr(S, W, 11);
274
RNDr(S, W, 12);
275
RNDr(S, W, 13);
276
RNDr(S, W, 14);
277
RNDr(S, W, 15);
278
RNDr(S, W, 16);
279
RNDr(S, W, 17);
280
RNDr(S, W, 18);
281
RNDr(S, W, 19);
282
RNDr(S, W, 20);
283
RNDr(S, W, 21);
284
RNDr(S, W, 22);
285
RNDr(S, W, 23);
286
RNDr(S, W, 24);
287
RNDr(S, W, 25);
288
RNDr(S, W, 26);
289
RNDr(S, W, 27);
290
RNDr(S, W, 28);
291
RNDr(S, W, 29);
292
RNDr(S, W, 30);
293
RNDr(S, W, 31);
294
RNDr(S, W, 32);
295
RNDr(S, W, 33);
296
RNDr(S, W, 34);
297
RNDr(S, W, 35);
298
RNDr(S, W, 36);
299
RNDr(S, W, 37);
300
RNDr(S, W, 38);
301
RNDr(S, W, 39);
302
RNDr(S, W, 40);
303
RNDr(S, W, 41);
304
RNDr(S, W, 42);
305
RNDr(S, W, 43);
306
RNDr(S, W, 44);
307
RNDr(S, W, 45);
308
RNDr(S, W, 46);
309
RNDr(S, W, 47);
310
RNDr(S, W, 48);
311
RNDr(S, W, 49);
312
RNDr(S, W, 50);
313
RNDr(S, W, 51);
314
RNDr(S, W, 52);
315
RNDr(S, W, 53);
316
RNDr(S, W, 54);
317
RNDr(S, W, 55);
318
RNDr(S, W, 56);
319
RNDr(S, W, 57);
320
RNDr(S, W, 58);
321
RNDr(S, W, 59);
322
RNDr(S, W, 60);
323
RNDr(S, W, 61);
324
RNDr(S, W, 62);
325
RNDr(S, W, 63);
326
327
/* 4. Mix local working variables into global state */
328
for (i = 0; i < 8; i++)
329
state[i] += S[i];
330
}
331
332
// standard sha256 hash
333
#if 1
334
static void sha256_hash(unsigned char *hash, const unsigned char *data, int len)
335
{
336
uint32_t _ALIGN(64) S[16];
337
uint32_t _ALIGN(64) T[64];
338
int i, r;
339
340
sha256_init(S);
341
for (r = len; r > -9; r -= 64) {
342
if (r < 64)
343
memset(T, 0, 64);
344
memcpy(T, data + len - r, r > 64 ? 64 : (r < 0 ? 0 : r));
345
if (r >= 0 && r < 64)
346
((unsigned char *)T)[r] = 0x80;
347
for (i = 0; i < 16; i++)
348
T[i] = be32dec(T + i);
349
if (r < 56)
350
T[15] = 8 * len;
351
//sha256_transform(S, T, 0);
352
sha256_transform_volatile(S, T);
353
}
354
for (i = 0; i < 8; i++)
355
be32enc((uint32_t *)hash + i, S[i]);
356
}
357
#else
358
#include <openssl/sha.h>
359
static void sha256_hash(unsigned char *hash, const unsigned char *data, int len)
360
{
361
SHA256_CTX ctx;
362
SHA256_Init(&ctx);
363
SHA256_Update(&ctx, data, len);
364
SHA256_Final(hash, &ctx);
365
}
366
#endif
367
368
// hash exactly 64 bytes (ie, sha256 block size)
369
static void sha256_hash512(uint32_t *hash, const uint32_t *data)
370
{
371
uint32_t _ALIGN(64) S[16];
372
uint32_t _ALIGN(64) T[64];
373
uchar _ALIGN(64) E[64*4] = { 0 };
374
int i;
375
376
sha256_init(S);
377
378
for (i = 0; i < 16; i++)
379
T[i] = be32dec(&data[i]);
380
sha256_transform_volatile(S, T);
381
382
E[3] = 0x80;
383
E[61] = 0x02; // T[15] = 8 * 64 => 0x200;
384
sha256_transform_volatile(S, (uint32_t*)E);
385
386
for (i = 0; i < 8; i++)
387
be32enc(&hash[i], S[i]);
388
}
389
390
void pluck_hash(uint32_t *hash, const uint32_t *data, uchar *hashbuffer, const int N)
391
{
392
int size = N * 1024;
393
sha256_hash(hashbuffer, (void*)data, BLOCK_HEADER_SIZE);
394
memset(&hashbuffer[32], 0, 32);
395
396
for(int i = 64; i < size - 32; i += 32)
397
{
398
uint32_t _ALIGN(64) randseed[16];
399
uint32_t _ALIGN(64) randbuffer[16];
400
uint32_t _ALIGN(64) joint[16];
401
//i-4 because we use integers for all references against this, and we don't want to go 3 bytes over the defined area
402
//we could use size here, but then it's probable to use 0 as the value in most cases
403
int randmax = i - 4;
404
405
//setup randbuffer to be an array of random indexes
406
memcpy(randseed, &hashbuffer[i - 64], 64);
407
408
if(i > 128) memcpy(randbuffer, &hashbuffer[i - 128], 64);
409
//else memset(randbuffer, 0, 64);
410
411
xor_salsa8((void*)randbuffer, (void*)randseed, i);
412
memcpy(joint, &hashbuffer[i - 32], 32);
413
414
//use the last hash value as the seed
415
for (int j = 32; j < 64; j += 4)
416
{
417
//every other time, change to next random index
418
//randmax - 32 as otherwise we go beyond memory that's already been written to
419
uint32_t rand = randbuffer[(j - 32) >> 2] % (randmax - 32);
420
joint[j >> 2] = *((uint32_t *)&hashbuffer[rand]);
421
}
422
423
sha256_hash512((uint32_t*) &hashbuffer[i], joint);
424
425
//setup randbuffer to be an array of random indexes
426
//use last hash value and previous hash value(post-mixing)
427
memcpy(randseed, &hashbuffer[i - 32], 64);
428
429
if(i > 128) memcpy(randbuffer, &hashbuffer[i - 128], 64);
430
//else memset(randbuffer, 0, 64);
431
432
xor_salsa8((void*)randbuffer, (void*)randseed, i);
433
434
//use the last hash value as the seed
435
for (int j = 0; j < 32; j += 2)
436
{
437
uint32_t rand = randbuffer[j >> 1] % randmax;
438
*((uint32_t *)(hashbuffer + rand)) = *((uint32_t *)(hashbuffer + j + randmax));
439
}
440
}
441
442
memcpy(hash, hashbuffer, 32);
443
}
444
445
int scanhash_pluck(int thr_id, struct work *work, uint32_t max_nonce, uint64_t *hashes_done,
446
unsigned char *scratchbuf, int N)
447
{
448
uint32_t _ALIGN(128) hash[8];
449
uint32_t _ALIGN(128) endiandata[20];
450
uint32_t *pdata = work->data;
451
uint32_t *ptarget = work->target;
452
const uint32_t first_nonce = pdata[19];
453
volatile uint8_t *restart = &(work_restart[thr_id].restart);
454
uint32_t n = first_nonce;
455
456
if (opt_benchmark)
457
ptarget[7] = 0xffff;
458
459
for (int k = 0; k < 19; k++)
460
be32enc(&endiandata[k], pdata[k]);
461
462
const uint32_t Htarg = ptarget[7];
463
do {
464
//be32enc(&endiandata[19], n);
465
endiandata[19] = n;
466
pluck_hash(hash, endiandata, scratchbuf, N);
467
468
if (hash[7] <= Htarg && fulltest(hash, ptarget))
469
{
470
work_set_target_ratio(work, hash);
471
*hashes_done = n - first_nonce + 1;
472
pdata[19] = htobe32(endiandata[19]);
473
return 1;
474
}
475
n++;
476
} while (n < max_nonce && !(*restart));
477
478
*hashes_done = n - first_nonce + 1;
479
pdata[19] = n;
480
return 0;
481
}
482
483