Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
tpruvot
GitHub Repository: tpruvot/cpuminer-multi
Path: blob/linux/algo/cryptolight.c
1201 views
1
// Copyright (c) 2012-2013 The Cryptonote developers
2
// Distributed under the MIT/X11 software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include "miner.h"
6
7
#if defined(__arm__) || defined(_MSC_VER)
8
#ifndef NOASM
9
#define NOASM
10
#endif
11
#endif
12
13
#include "crypto/oaes_lib.h"
14
#include "crypto/c_keccak.h"
15
#include "crypto/c_groestl.h"
16
#include "crypto/c_blake256.h"
17
#include "crypto/c_jh.h"
18
#include "crypto/c_skein.h"
19
#include "crypto/int-util.h"
20
#include "crypto/hash-ops.h"
21
22
#if USE_INT128
23
24
#if __GNUC__ == 4 && __GNUC_MINOR__ >= 4 && __GNUC_MINOR__ < 6
25
typedef unsigned int uint128_t __attribute__ ((__mode__ (TI)));
26
#elif defined (_MSC_VER)
27
/* only for mingw64 on windows */
28
#undef USE_INT128
29
#define USE_INT128 (0)
30
#else
31
typedef __uint128_t uint128_t;
32
#endif
33
34
#endif
35
36
#define LITE 1
37
#if LITE /* cryptonight-light */
38
#define MEMORY (1 << 20)
39
#define ITER (1 << 19)
40
#else
41
#define MEMORY (1 << 21) /* 2 MiB */
42
#define ITER (1 << 20)
43
#endif
44
45
#define AES_BLOCK_SIZE 16
46
#define AES_KEY_SIZE 32 /*16*/
47
#define INIT_SIZE_BLK 8
48
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
49
50
#pragma pack(push, 1)
51
union cn_slow_hash_state {
52
union hash_state hs;
53
struct {
54
uint8_t k[64];
55
uint8_t init[INIT_SIZE_BYTE];
56
};
57
};
58
#pragma pack(pop)
59
60
static void do_blake_hash(const void* input, size_t len, char* output) {
61
blake256_hash((uint8_t*)output, input, len);
62
}
63
64
static void do_groestl_hash(const void* input, size_t len, char* output) {
65
groestl(input, len * 8, (uint8_t*)output);
66
}
67
68
static void do_jh_hash(const void* input, size_t len, char* output) {
69
int r = jh_hash(HASH_SIZE * 8, input, 8 * len, (uint8_t*)output);
70
assert(likely(SUCCESS == r));
71
}
72
73
static void do_skein_hash(const void* input, size_t len, char* output) {
74
int r = skein_hash(8 * HASH_SIZE, input, 8 * len, (uint8_t*)output);
75
assert(likely(SKEIN_SUCCESS == r));
76
}
77
78
extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
79
extern int aesb_pseudo_round_mut(uint8_t *val, uint8_t *expandedKey);
80
#if !defined(_MSC_VER) && !defined(NOASM)
81
extern int fast_aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
82
extern int fast_aesb_pseudo_round_mut(uint8_t *val, uint8_t *expandedKey);
83
#else
84
#define fast_aesb_single_round aesb_single_round
85
#define fast_aesb_pseudo_round_mut aesb_pseudo_round_mut
86
#endif
87
88
#if defined(NOASM) || !defined(__x86_64__)
89
static uint64_t mul128(uint64_t multiplier, uint64_t multiplicand, uint64_t* product_hi) {
90
// multiplier = ab = a * 2^32 + b
91
// multiplicand = cd = c * 2^32 + d
92
// ab * cd = a * c * 2^64 + (a * d + b * c) * 2^32 + b * d
93
uint64_t a = hi_dword(multiplier);
94
uint64_t b = lo_dword(multiplier);
95
uint64_t c = hi_dword(multiplicand);
96
uint64_t d = lo_dword(multiplicand);
97
98
uint64_t ac = a * c;
99
uint64_t ad = a * d;
100
uint64_t bc = b * c;
101
uint64_t bd = b * d;
102
103
uint64_t adbc = ad + bc;
104
uint64_t adbc_carry = adbc < ad ? 1 : 0;
105
106
// multiplier * multiplicand = product_hi * 2^64 + product_lo
107
uint64_t product_lo = bd + (adbc << 32);
108
uint64_t product_lo_carry = product_lo < bd ? 1 : 0;
109
*product_hi = ac + (adbc >> 32) + (adbc_carry << 32) + product_lo_carry;
110
assert(ac <= *product_hi);
111
112
return product_lo;
113
}
114
#else
115
extern uint64_t mul128(uint64_t multiplier, uint64_t multiplicand, uint64_t* product_hi);
116
#endif
117
118
static void (* const extra_hashes[4])(const void *, size_t, char *) = {
119
do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash
120
};
121
122
123
static inline size_t e2i(const uint8_t* a) {
124
#if !LITE
125
return ((uint32_t *)a)[0] & 0x1FFFF0;
126
#else
127
return ((uint32_t *)a)[0] & 0xFFFF0;
128
#endif
129
}
130
131
static inline void mul_sum_xor_dst(const uint8_t* a, uint8_t* c, uint8_t* dst, const int variant, const uint64_t tweak) {
132
uint64_t hi, lo = mul128(((uint64_t*) a)[0], ((uint64_t*) dst)[0], &hi) + ((uint64_t*) c)[1];
133
hi += ((uint64_t*) c)[0];
134
135
((uint64_t*) c)[0] = ((uint64_t*) dst)[0] ^ hi;
136
((uint64_t*) c)[1] = ((uint64_t*) dst)[1] ^ lo;
137
((uint64_t*) dst)[0] = hi;
138
((uint64_t*) dst)[1] = variant ? lo ^ tweak : lo;
139
}
140
141
static inline void xor_blocks(uint8_t* a, const uint8_t* b) {
142
#if USE_INT128
143
*((uint128_t*) a) ^= *((uint128_t*) b);
144
#else
145
((uint64_t*) a)[0] ^= ((uint64_t*) b)[0];
146
((uint64_t*) a)[1] ^= ((uint64_t*) b)[1];
147
#endif
148
}
149
150
static inline void xor_blocks_dst(const uint8_t* a, const uint8_t* b, uint8_t* dst) {
151
#if USE_INT128
152
*((uint128_t*) dst) = *((uint128_t*) a) ^ *((uint128_t*) b);
153
#else
154
((uint64_t*) dst)[0] = ((uint64_t*) a)[0] ^ ((uint64_t*) b)[0];
155
((uint64_t*) dst)[1] = ((uint64_t*) a)[1] ^ ((uint64_t*) b)[1];
156
#endif
157
}
158
159
static void cryptolight_store_variant(void* state, int variant) {
160
if (variant == 1) {
161
// use variant 1 like monero since june 2018
162
const uint8_t tmp = ((const uint8_t*)(state))[11];
163
const uint8_t index = (((tmp >> 3) & 6) | (tmp & 1)) << 1;
164
((uint8_t*)(state))[11] = tmp ^ ((0x75310 >> index) & 0x30);
165
}
166
}
167
168
struct cryptonight_ctx {
169
uint8_t _ALIGN(16) long_state[MEMORY];
170
union cn_slow_hash_state state;
171
uint8_t _ALIGN(16) text[INIT_SIZE_BYTE];
172
uint8_t _ALIGN(16) a[AES_BLOCK_SIZE];
173
uint8_t _ALIGN(16) b[AES_BLOCK_SIZE];
174
uint8_t _ALIGN(16) c[AES_BLOCK_SIZE];
175
oaes_ctx* aes_ctx;
176
};
177
178
static void cryptolight_hash_ctx(void* output, const void* input, int len, struct cryptonight_ctx* ctx, int variant)
179
{
180
size_t i, j;
181
182
hash_process(&ctx->state.hs, (const uint8_t*) input, len);
183
ctx->aes_ctx = (oaes_ctx*) oaes_alloc();
184
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
185
186
const uint64_t tweak = variant ? *((uint64_t*) (((uint8_t*)input) + 35)) ^ ctx->state.hs.w[24] : 0;
187
188
oaes_key_import_data(ctx->aes_ctx, ctx->state.hs.b, AES_KEY_SIZE);
189
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
190
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 0], ctx->aes_ctx->key->exp_data);
191
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 1], ctx->aes_ctx->key->exp_data);
192
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 2], ctx->aes_ctx->key->exp_data);
193
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 3], ctx->aes_ctx->key->exp_data);
194
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 4], ctx->aes_ctx->key->exp_data);
195
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 5], ctx->aes_ctx->key->exp_data);
196
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 6], ctx->aes_ctx->key->exp_data);
197
aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 7], ctx->aes_ctx->key->exp_data);
198
memcpy(&ctx->long_state[i], ctx->text, INIT_SIZE_BYTE);
199
}
200
201
xor_blocks_dst(&ctx->state.k[0], &ctx->state.k[32], ctx->a);
202
xor_blocks_dst(&ctx->state.k[16], &ctx->state.k[48], ctx->b);
203
204
for (i = 0; likely(i < ITER / 4); ++i) {
205
/* Dependency chain: address -> read value ------+
206
* written value <-+ hard function (AES or MUL) <+
207
* next address <-+
208
*/
209
/* Iteration 1 */
210
j = e2i(ctx->a);
211
aesb_single_round(&ctx->long_state[j], ctx->c, ctx->a);
212
xor_blocks_dst(ctx->c, ctx->b, &ctx->long_state[j]);
213
/* Iteration 2 */
214
cryptolight_store_variant(&ctx->long_state[j], variant);
215
mul_sum_xor_dst(ctx->c, ctx->a, &ctx->long_state[e2i(ctx->c)], variant, tweak);
216
217
/* Iteration 3 */
218
j = e2i(ctx->a);
219
aesb_single_round(&ctx->long_state[j], ctx->b, ctx->a);
220
xor_blocks_dst(ctx->b, ctx->c, &ctx->long_state[j]);
221
/* Iteration 4 */
222
cryptolight_store_variant(&ctx->long_state[j], variant);
223
mul_sum_xor_dst(ctx->b, ctx->a, &ctx->long_state[e2i(ctx->b)], variant, tweak);
224
}
225
226
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
227
oaes_key_import_data(ctx->aes_ctx, &ctx->state.hs.b[32], AES_KEY_SIZE);
228
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
229
xor_blocks(&ctx->text[0 * AES_BLOCK_SIZE], &ctx->long_state[i + 0 * AES_BLOCK_SIZE]);
230
aesb_pseudo_round_mut(&ctx->text[0 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
231
xor_blocks(&ctx->text[1 * AES_BLOCK_SIZE], &ctx->long_state[i + 1 * AES_BLOCK_SIZE]);
232
aesb_pseudo_round_mut(&ctx->text[1 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
233
xor_blocks(&ctx->text[2 * AES_BLOCK_SIZE], &ctx->long_state[i + 2 * AES_BLOCK_SIZE]);
234
aesb_pseudo_round_mut(&ctx->text[2 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
235
xor_blocks(&ctx->text[3 * AES_BLOCK_SIZE], &ctx->long_state[i + 3 * AES_BLOCK_SIZE]);
236
aesb_pseudo_round_mut(&ctx->text[3 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
237
xor_blocks(&ctx->text[4 * AES_BLOCK_SIZE], &ctx->long_state[i + 4 * AES_BLOCK_SIZE]);
238
aesb_pseudo_round_mut(&ctx->text[4 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
239
xor_blocks(&ctx->text[5 * AES_BLOCK_SIZE], &ctx->long_state[i + 5 * AES_BLOCK_SIZE]);
240
aesb_pseudo_round_mut(&ctx->text[5 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
241
xor_blocks(&ctx->text[6 * AES_BLOCK_SIZE], &ctx->long_state[i + 6 * AES_BLOCK_SIZE]);
242
aesb_pseudo_round_mut(&ctx->text[6 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
243
xor_blocks(&ctx->text[7 * AES_BLOCK_SIZE], &ctx->long_state[i + 7 * AES_BLOCK_SIZE]);
244
aesb_pseudo_round_mut(&ctx->text[7 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
245
}
246
memcpy(ctx->state.init, ctx->text, INIT_SIZE_BYTE);
247
hash_permutation(&ctx->state.hs);
248
/*memcpy(hash, &state, 32);*/
249
extra_hashes[ctx->state.hs.b[0] & 3](&ctx->state, 200, output);
250
oaes_free((OAES_CTX **) &ctx->aes_ctx);
251
}
252
253
void cryptolight_hash(void* output, const void* input) {
254
const int variant = 1;
255
struct cryptonight_ctx *ctx = (struct cryptonight_ctx*)malloc(sizeof(struct cryptonight_ctx));
256
cryptolight_hash_ctx(output, input, 76, ctx, variant);
257
free(ctx);
258
}
259
260
static void cryptolight_hash_ctx_aes_ni(void* output, const void* input, int len, struct cryptonight_ctx* ctx, int variant)
261
{
262
size_t i, j;
263
264
hash_process(&ctx->state.hs, (const uint8_t*)input, len);
265
ctx->aes_ctx = (oaes_ctx*) oaes_alloc();
266
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
267
268
const uint64_t tweak = variant ? *((uint64_t*) (((uint8_t*)input) + 35)) ^ ctx->state.hs.w[24] : 0;
269
270
oaes_key_import_data(ctx->aes_ctx, ctx->state.hs.b, AES_KEY_SIZE);
271
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
272
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 0], ctx->aes_ctx->key->exp_data);
273
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 1], ctx->aes_ctx->key->exp_data);
274
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 2], ctx->aes_ctx->key->exp_data);
275
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 3], ctx->aes_ctx->key->exp_data);
276
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 4], ctx->aes_ctx->key->exp_data);
277
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 5], ctx->aes_ctx->key->exp_data);
278
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 6], ctx->aes_ctx->key->exp_data);
279
fast_aesb_pseudo_round_mut(&ctx->text[AES_BLOCK_SIZE * 7], ctx->aes_ctx->key->exp_data);
280
memcpy(&ctx->long_state[i], ctx->text, INIT_SIZE_BYTE);
281
}
282
283
xor_blocks_dst(&ctx->state.k[0], &ctx->state.k[32], ctx->a);
284
xor_blocks_dst(&ctx->state.k[16], &ctx->state.k[48], ctx->b);
285
286
for (i = 0; likely(i < ITER / 4); ++i) {
287
/* Dependency chain: address -> read value ------+
288
* written value <-+ hard function (AES or MUL) <+
289
* next address <-+
290
*/
291
/* Iteration 1 */
292
j = e2i(ctx->a);
293
fast_aesb_single_round(&ctx->long_state[j], ctx->c, ctx->a);
294
xor_blocks_dst(ctx->c, ctx->b, &ctx->long_state[j]);
295
/* Iteration 2 */
296
cryptolight_store_variant(&ctx->long_state[j], variant);
297
mul_sum_xor_dst(ctx->c, ctx->a, &ctx->long_state[e2i(ctx->c)], variant, tweak);
298
299
/* Iteration 3 */
300
j = e2i(ctx->a);
301
fast_aesb_single_round(&ctx->long_state[j], ctx->b, ctx->a);
302
xor_blocks_dst(ctx->b, ctx->c, &ctx->long_state[j]);
303
/* Iteration 4 */
304
cryptolight_store_variant(&ctx->long_state[j], variant);
305
mul_sum_xor_dst(ctx->b, ctx->a, &ctx->long_state[e2i(ctx->b)], variant, tweak);
306
}
307
308
memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE);
309
oaes_key_import_data(ctx->aes_ctx, &ctx->state.hs.b[32], AES_KEY_SIZE);
310
for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) {
311
xor_blocks(&ctx->text[0 * AES_BLOCK_SIZE], &ctx->long_state[i + 0 * AES_BLOCK_SIZE]);
312
fast_aesb_pseudo_round_mut(&ctx->text[0 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
313
xor_blocks(&ctx->text[1 * AES_BLOCK_SIZE], &ctx->long_state[i + 1 * AES_BLOCK_SIZE]);
314
fast_aesb_pseudo_round_mut(&ctx->text[1 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
315
xor_blocks(&ctx->text[2 * AES_BLOCK_SIZE], &ctx->long_state[i + 2 * AES_BLOCK_SIZE]);
316
fast_aesb_pseudo_round_mut(&ctx->text[2 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
317
xor_blocks(&ctx->text[3 * AES_BLOCK_SIZE], &ctx->long_state[i + 3 * AES_BLOCK_SIZE]);
318
fast_aesb_pseudo_round_mut(&ctx->text[3 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
319
xor_blocks(&ctx->text[4 * AES_BLOCK_SIZE], &ctx->long_state[i + 4 * AES_BLOCK_SIZE]);
320
fast_aesb_pseudo_round_mut(&ctx->text[4 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
321
xor_blocks(&ctx->text[5 * AES_BLOCK_SIZE], &ctx->long_state[i + 5 * AES_BLOCK_SIZE]);
322
fast_aesb_pseudo_round_mut(&ctx->text[5 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
323
xor_blocks(&ctx->text[6 * AES_BLOCK_SIZE], &ctx->long_state[i + 6 * AES_BLOCK_SIZE]);
324
fast_aesb_pseudo_round_mut(&ctx->text[6 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
325
xor_blocks(&ctx->text[7 * AES_BLOCK_SIZE], &ctx->long_state[i + 7 * AES_BLOCK_SIZE]);
326
fast_aesb_pseudo_round_mut(&ctx->text[7 * AES_BLOCK_SIZE], ctx->aes_ctx->key->exp_data);
327
}
328
memcpy(ctx->state.init, ctx->text, INIT_SIZE_BYTE);
329
hash_permutation(&ctx->state.hs);
330
/*memcpy(hash, &state, 32);*/
331
extra_hashes[ctx->state.hs.b[0] & 3](&ctx->state, 200, output);
332
oaes_free((OAES_CTX **) &ctx->aes_ctx);
333
}
334
335
int scanhash_cryptolight(int thr_id, struct work *work, uint32_t max_nonce, uint64_t *hashes_done)
336
{
337
const int variant = 1; // since june 2018
338
uint32_t _ALIGN(128) hash[HASH_SIZE / 4];
339
uint32_t *pdata = work->data;
340
uint32_t *ptarget = work->target;
341
342
uint32_t *nonceptr = (uint32_t*) (((char*)pdata) + 39);
343
uint32_t n = *nonceptr - 1;
344
const uint32_t first_nonce = n + 1;
345
346
struct cryptonight_ctx *ctx = (struct cryptonight_ctx*)malloc(sizeof(struct cryptonight_ctx));
347
348
if (aes_ni_supported) {
349
do {
350
*nonceptr = ++n;
351
cryptolight_hash_ctx_aes_ni(hash, pdata, 76, ctx, variant);
352
if (unlikely(hash[7] < ptarget[7])) {
353
work_set_target_ratio(work, hash);
354
*hashes_done = n - first_nonce + 1;
355
free(ctx);
356
return 1;
357
}
358
} while (likely((n <= max_nonce && !work_restart[thr_id].restart)));
359
} else {
360
do {
361
*nonceptr = ++n;
362
cryptolight_hash_ctx(hash, pdata, 76, ctx, variant);
363
if (unlikely(hash[7] < ptarget[7])) {
364
work_set_target_ratio(work, hash);
365
*hashes_done = n - first_nonce + 1;
366
free(ctx);
367
return 1;
368
}
369
} while (likely((n <= max_nonce && !work_restart[thr_id].restart)));
370
}
371
372
free(ctx);
373
*hashes_done = n - first_nonce + 1;
374
return 0;
375
}
376
377
378