Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/inside-secure/eip93/eip93-hash.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2024
4
*
5
* Christian Marangi <[email protected]
6
*/
7
8
#include <crypto/sha1.h>
9
#include <crypto/sha2.h>
10
#include <crypto/md5.h>
11
#include <crypto/hmac.h>
12
#include <linux/dma-mapping.h>
13
#include <linux/delay.h>
14
15
#include "eip93-cipher.h"
16
#include "eip93-hash.h"
17
#include "eip93-main.h"
18
#include "eip93-common.h"
19
#include "eip93-regs.h"
20
21
static void eip93_hash_free_data_blocks(struct ahash_request *req)
22
{
23
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
24
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
25
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
26
struct eip93_device *eip93 = ctx->eip93;
27
struct mkt_hash_block *block, *tmp;
28
29
list_for_each_entry_safe(block, tmp, &rctx->blocks, list) {
30
dma_unmap_single(eip93->dev, block->data_dma,
31
SHA256_BLOCK_SIZE, DMA_TO_DEVICE);
32
kfree(block);
33
}
34
if (!list_empty(&rctx->blocks))
35
INIT_LIST_HEAD(&rctx->blocks);
36
37
if (rctx->finalize)
38
dma_unmap_single(eip93->dev, rctx->data_dma,
39
rctx->data_used,
40
DMA_TO_DEVICE);
41
}
42
43
static void eip93_hash_free_sa_record(struct ahash_request *req)
44
{
45
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
46
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
47
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
48
struct eip93_device *eip93 = ctx->eip93;
49
50
if (IS_HMAC(ctx->flags))
51
dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base,
52
sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE);
53
54
dma_unmap_single(eip93->dev, rctx->sa_record_base,
55
sizeof(rctx->sa_record), DMA_TO_DEVICE);
56
}
57
58
void eip93_hash_handle_result(struct crypto_async_request *async, int err)
59
{
60
struct ahash_request *req = ahash_request_cast(async);
61
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
62
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
63
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
64
int digestsize = crypto_ahash_digestsize(ahash);
65
struct sa_state *sa_state = &rctx->sa_state;
66
struct eip93_device *eip93 = ctx->eip93;
67
int i;
68
69
dma_unmap_single(eip93->dev, rctx->sa_state_base,
70
sizeof(*sa_state), DMA_FROM_DEVICE);
71
72
/*
73
* With partial_hash assume SHA256_DIGEST_SIZE buffer is passed.
74
* This is to handle SHA224 that have a 32 byte intermediate digest.
75
*/
76
if (rctx->partial_hash)
77
digestsize = SHA256_DIGEST_SIZE;
78
79
if (rctx->finalize || rctx->partial_hash) {
80
/* bytes needs to be swapped for req->result */
81
if (!IS_HASH_MD5(ctx->flags)) {
82
for (i = 0; i < digestsize / sizeof(u32); i++) {
83
u32 *digest = (u32 *)sa_state->state_i_digest;
84
85
digest[i] = be32_to_cpu((__be32 __force)digest[i]);
86
}
87
}
88
89
memcpy(req->result, sa_state->state_i_digest, digestsize);
90
}
91
92
eip93_hash_free_sa_record(req);
93
eip93_hash_free_data_blocks(req);
94
95
ahash_request_complete(req, err);
96
}
97
98
static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
99
{
100
static const u32 sha256_init[] = {
101
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
102
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
103
};
104
static const u32 sha224_init[] = {
105
SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
106
SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7
107
};
108
static const u32 sha1_init[] = {
109
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4
110
};
111
static const u32 md5_init[] = {
112
MD5_H0, MD5_H1, MD5_H2, MD5_H3
113
};
114
115
/* Init HASH constant */
116
switch (hash) {
117
case EIP93_HASH_SHA256:
118
memcpy(digest, sha256_init, sizeof(sha256_init));
119
return;
120
case EIP93_HASH_SHA224:
121
memcpy(digest, sha224_init, sizeof(sha224_init));
122
return;
123
case EIP93_HASH_SHA1:
124
memcpy(digest, sha1_init, sizeof(sha1_init));
125
return;
126
case EIP93_HASH_MD5:
127
memcpy(digest, md5_init, sizeof(md5_init));
128
return;
129
default: /* Impossible */
130
return;
131
}
132
}
133
134
static void eip93_hash_export_sa_state(struct ahash_request *req,
135
struct eip93_hash_export_state *state)
136
{
137
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
138
struct sa_state *sa_state = &rctx->sa_state;
139
140
/*
141
* EIP93 have special handling for state_byte_cnt in sa_state.
142
* Even if a zero packet is passed (and a BADMSG is returned),
143
* state_byte_cnt is incremented to the digest handled (with the hash
144
* primitive). This is problematic with export/import as EIP93
145
* expect 0 state_byte_cnt for the very first iteration.
146
*/
147
if (!rctx->len)
148
memset(state->state_len, 0, sizeof(u32) * 2);
149
else
150
memcpy(state->state_len, sa_state->state_byte_cnt,
151
sizeof(u32) * 2);
152
memcpy(state->state_hash, sa_state->state_i_digest,
153
SHA256_DIGEST_SIZE);
154
state->len = rctx->len;
155
state->data_used = rctx->data_used;
156
}
157
158
static void __eip93_hash_init(struct ahash_request *req)
159
{
160
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
161
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
162
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
163
struct sa_record *sa_record = &rctx->sa_record;
164
int digestsize;
165
166
digestsize = crypto_ahash_digestsize(ahash);
167
168
eip93_set_sa_record(sa_record, 0, ctx->flags);
169
sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE;
170
sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH;
171
sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE;
172
sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE,
173
EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH);
174
sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH;
175
sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH,
176
digestsize / sizeof(u32));
177
178
/*
179
* HMAC special handling
180
* Enabling CMD_HMAC force the inner hash to be always finalized.
181
* This cause problems on handling message > 64 byte as we
182
* need to produce intermediate inner hash on sending intermediate
183
* 64 bytes blocks.
184
*
185
* To handle this, enable CMD_HMAC only on the last block.
186
* We make a duplicate of sa_record and on the last descriptor,
187
* we pass a dedicated sa_record with CMD_HMAC enabled to make
188
* EIP93 apply the outer hash.
189
*/
190
if (IS_HMAC(ctx->flags)) {
191
struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
192
193
memcpy(sa_record_hmac, sa_record, sizeof(*sa_record));
194
/* Copy pre-hashed opad for HMAC */
195
memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE);
196
197
/* Disable HMAC for hash normal sa_record */
198
sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC;
199
}
200
201
rctx->len = 0;
202
rctx->data_used = 0;
203
rctx->partial_hash = false;
204
rctx->finalize = false;
205
INIT_LIST_HEAD(&rctx->blocks);
206
}
207
208
static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data,
209
dma_addr_t *data_dma, u32 len, bool last)
210
{
211
struct ahash_request *req = ahash_request_cast(async);
212
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
213
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
214
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
215
struct eip93_device *eip93 = ctx->eip93;
216
struct eip93_descriptor cdesc = { };
217
dma_addr_t src_addr;
218
int ret;
219
220
/* Map block data to DMA */
221
src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE);
222
ret = dma_mapping_error(eip93->dev, src_addr);
223
if (ret)
224
return ret;
225
226
cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
227
EIP93_PE_CTRL_HOST_READY);
228
cdesc.sa_addr = rctx->sa_record_base;
229
cdesc.arc4_addr = 0;
230
231
cdesc.state_addr = rctx->sa_state_base;
232
cdesc.src_addr = src_addr;
233
cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
234
EIP93_PE_LENGTH_HOST_READY);
235
cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH,
236
len);
237
238
cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH);
239
240
if (last) {
241
int crypto_async_idr;
242
243
if (rctx->finalize && !rctx->partial_hash) {
244
/* For last block, pass sa_record with CMD_HMAC enabled */
245
if (IS_HMAC(ctx->flags)) {
246
struct sa_record *sa_record_hmac = &rctx->sa_record_hmac;
247
248
rctx->sa_record_hmac_base = dma_map_single(eip93->dev,
249
sa_record_hmac,
250
sizeof(*sa_record_hmac),
251
DMA_TO_DEVICE);
252
ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base);
253
if (ret)
254
return ret;
255
256
cdesc.sa_addr = rctx->sa_record_hmac_base;
257
}
258
259
cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL;
260
}
261
262
scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
263
crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
264
EIP93_RING_NUM - 1, GFP_ATOMIC);
265
266
cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
267
FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST);
268
}
269
270
again:
271
scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)
272
ret = eip93_put_descriptor(eip93, &cdesc);
273
if (ret) {
274
usleep_range(EIP93_RING_BUSY_DELAY,
275
EIP93_RING_BUSY_DELAY * 2);
276
goto again;
277
}
278
279
/* Writing new descriptor count starts DMA action */
280
writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
281
282
*data_dma = src_addr;
283
return 0;
284
}
285
286
static int eip93_hash_init(struct ahash_request *req)
287
{
288
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
289
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
290
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
291
struct sa_state *sa_state = &rctx->sa_state;
292
293
memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2);
294
eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK,
295
sa_state->state_i_digest);
296
297
__eip93_hash_init(req);
298
299
/* For HMAC setup the initial block for ipad */
300
if (IS_HMAC(ctx->flags)) {
301
memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE);
302
303
rctx->data_used = SHA256_BLOCK_SIZE;
304
rctx->len += SHA256_BLOCK_SIZE;
305
}
306
307
return 0;
308
}
309
310
/*
311
* With complete_req true, we wait for the engine to consume all the block in list,
312
* else we just queue the block to the engine as final() will wait. This is useful
313
* for finup().
314
*/
315
static int __eip93_hash_update(struct ahash_request *req, bool complete_req)
316
{
317
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
318
struct crypto_async_request *async = &req->base;
319
unsigned int read, to_consume = req->nbytes;
320
unsigned int max_read, consumed = 0;
321
struct mkt_hash_block *block;
322
bool wait_req = false;
323
int offset;
324
int ret;
325
326
/* Get the offset and available space to fill req data */
327
offset = rctx->data_used;
328
max_read = SHA256_BLOCK_SIZE - offset;
329
330
/* Consume req in block of SHA256_BLOCK_SIZE.
331
* to_read is initially set to space available in the req data
332
* and then reset to SHA256_BLOCK_SIZE.
333
*/
334
while (to_consume > max_read) {
335
block = kzalloc(sizeof(*block), GFP_ATOMIC);
336
if (!block) {
337
ret = -ENOMEM;
338
goto free_blocks;
339
}
340
341
read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
342
block->data + offset,
343
max_read, consumed);
344
345
/*
346
* For first iteration only, copy req data to block
347
* and reset offset and max_read for next iteration.
348
*/
349
if (offset > 0) {
350
memcpy(block->data, rctx->data, offset);
351
offset = 0;
352
max_read = SHA256_BLOCK_SIZE;
353
}
354
355
list_add(&block->list, &rctx->blocks);
356
to_consume -= read;
357
consumed += read;
358
}
359
360
/* Write the remaining data to req data */
361
read = sg_pcopy_to_buffer(req->src, sg_nents(req->src),
362
rctx->data + offset, to_consume,
363
consumed);
364
rctx->data_used = offset + read;
365
366
/* Update counter with processed bytes */
367
rctx->len += read + consumed;
368
369
/* Consume all the block added to list */
370
list_for_each_entry_reverse(block, &rctx->blocks, list) {
371
wait_req = complete_req &&
372
list_is_first(&block->list, &rctx->blocks);
373
374
ret = eip93_send_hash_req(async, block->data,
375
&block->data_dma,
376
SHA256_BLOCK_SIZE, wait_req);
377
if (ret)
378
goto free_blocks;
379
}
380
381
return wait_req ? -EINPROGRESS : 0;
382
383
free_blocks:
384
eip93_hash_free_data_blocks(req);
385
386
return ret;
387
}
388
389
static int eip93_hash_update(struct ahash_request *req)
390
{
391
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
392
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
393
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
394
struct sa_record *sa_record = &rctx->sa_record;
395
struct sa_state *sa_state = &rctx->sa_state;
396
struct eip93_device *eip93 = ctx->eip93;
397
int ret;
398
399
if (!req->nbytes)
400
return 0;
401
402
rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
403
sizeof(*sa_state),
404
DMA_TO_DEVICE);
405
ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
406
if (ret)
407
return ret;
408
409
rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
410
sizeof(*sa_record),
411
DMA_TO_DEVICE);
412
ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
413
if (ret)
414
goto free_sa_state;
415
416
ret = __eip93_hash_update(req, true);
417
if (ret && ret != -EINPROGRESS)
418
goto free_sa_record;
419
420
return ret;
421
422
free_sa_record:
423
dma_unmap_single(eip93->dev, rctx->sa_record_base,
424
sizeof(*sa_record), DMA_TO_DEVICE);
425
426
free_sa_state:
427
dma_unmap_single(eip93->dev, rctx->sa_state_base,
428
sizeof(*sa_state), DMA_TO_DEVICE);
429
430
return ret;
431
}
432
433
/*
434
* With map_data true, we map the sa_record and sa_state. This is needed
435
* for finup() as the they are mapped before calling update()
436
*/
437
static int __eip93_hash_final(struct ahash_request *req, bool map_dma)
438
{
439
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
440
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
441
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
442
struct crypto_async_request *async = &req->base;
443
struct sa_record *sa_record = &rctx->sa_record;
444
struct sa_state *sa_state = &rctx->sa_state;
445
struct eip93_device *eip93 = ctx->eip93;
446
int ret;
447
448
/* EIP93 can't handle zero bytes hash */
449
if (!rctx->len && !IS_HMAC(ctx->flags)) {
450
switch ((ctx->flags & EIP93_HASH_MASK)) {
451
case EIP93_HASH_SHA256:
452
memcpy(req->result, sha256_zero_message_hash,
453
SHA256_DIGEST_SIZE);
454
break;
455
case EIP93_HASH_SHA224:
456
memcpy(req->result, sha224_zero_message_hash,
457
SHA224_DIGEST_SIZE);
458
break;
459
case EIP93_HASH_SHA1:
460
memcpy(req->result, sha1_zero_message_hash,
461
SHA1_DIGEST_SIZE);
462
break;
463
case EIP93_HASH_MD5:
464
memcpy(req->result, md5_zero_message_hash,
465
MD5_DIGEST_SIZE);
466
break;
467
default: /* Impossible */
468
return -EINVAL;
469
}
470
471
return 0;
472
}
473
474
/* Signal interrupt from engine is for last block */
475
rctx->finalize = true;
476
477
if (map_dma) {
478
rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
479
sizeof(*sa_state),
480
DMA_TO_DEVICE);
481
ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
482
if (ret)
483
return ret;
484
485
rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
486
sizeof(*sa_record),
487
DMA_TO_DEVICE);
488
ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
489
if (ret)
490
goto free_sa_state;
491
}
492
493
/* Send last block */
494
ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma,
495
rctx->data_used, true);
496
if (ret)
497
goto free_blocks;
498
499
return -EINPROGRESS;
500
501
free_blocks:
502
eip93_hash_free_data_blocks(req);
503
504
dma_unmap_single(eip93->dev, rctx->sa_record_base,
505
sizeof(*sa_record), DMA_TO_DEVICE);
506
507
free_sa_state:
508
dma_unmap_single(eip93->dev, rctx->sa_state_base,
509
sizeof(*sa_state), DMA_TO_DEVICE);
510
511
return ret;
512
}
513
514
static int eip93_hash_final(struct ahash_request *req)
515
{
516
return __eip93_hash_final(req, true);
517
}
518
519
static int eip93_hash_finup(struct ahash_request *req)
520
{
521
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
522
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
523
struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash);
524
struct sa_record *sa_record = &rctx->sa_record;
525
struct sa_state *sa_state = &rctx->sa_state;
526
struct eip93_device *eip93 = ctx->eip93;
527
int ret;
528
529
if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) {
530
rctx->sa_state_base = dma_map_single(eip93->dev, sa_state,
531
sizeof(*sa_state),
532
DMA_TO_DEVICE);
533
ret = dma_mapping_error(eip93->dev, rctx->sa_state_base);
534
if (ret)
535
return ret;
536
537
rctx->sa_record_base = dma_map_single(eip93->dev, sa_record,
538
sizeof(*sa_record),
539
DMA_TO_DEVICE);
540
ret = dma_mapping_error(eip93->dev, rctx->sa_record_base);
541
if (ret)
542
goto free_sa_state;
543
544
ret = __eip93_hash_update(req, false);
545
if (ret)
546
goto free_sa_record;
547
}
548
549
return __eip93_hash_final(req, false);
550
551
free_sa_record:
552
dma_unmap_single(eip93->dev, rctx->sa_record_base,
553
sizeof(*sa_record), DMA_TO_DEVICE);
554
free_sa_state:
555
dma_unmap_single(eip93->dev, rctx->sa_state_base,
556
sizeof(*sa_state), DMA_TO_DEVICE);
557
558
return ret;
559
}
560
561
static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
562
u32 keylen)
563
{
564
unsigned int digestsize = crypto_ahash_digestsize(ahash);
565
struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
566
struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
567
568
return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize,
569
ctx->ipad, ctx->opad, true);
570
}
571
572
static int eip93_hash_cra_init(struct crypto_tfm *tfm)
573
{
574
struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm);
575
struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg,
576
struct eip93_alg_template, alg.ahash.halg.base);
577
578
crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
579
sizeof(struct eip93_hash_reqctx));
580
581
ctx->eip93 = tmpl->eip93;
582
ctx->flags = tmpl->flags;
583
584
return 0;
585
}
586
587
static int eip93_hash_digest(struct ahash_request *req)
588
{
589
int ret;
590
591
ret = eip93_hash_init(req);
592
if (ret)
593
return ret;
594
595
return eip93_hash_finup(req);
596
}
597
598
static int eip93_hash_import(struct ahash_request *req, const void *in)
599
{
600
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
601
const struct eip93_hash_export_state *state = in;
602
struct sa_state *sa_state = &rctx->sa_state;
603
604
memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2);
605
memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE);
606
607
__eip93_hash_init(req);
608
609
rctx->len = state->len;
610
rctx->data_used = state->data_used;
611
612
/* Skip copying data if we have nothing to copy */
613
if (rctx->len)
614
memcpy(rctx->data, state->data, rctx->data_used);
615
616
return 0;
617
}
618
619
static int eip93_hash_export(struct ahash_request *req, void *out)
620
{
621
struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req);
622
struct eip93_hash_export_state *state = out;
623
624
/* Save the first block in state data */
625
if (rctx->len)
626
memcpy(state->data, rctx->data, rctx->data_used);
627
628
eip93_hash_export_sa_state(req, state);
629
630
return 0;
631
}
632
633
struct eip93_alg_template eip93_alg_md5 = {
634
.type = EIP93_ALG_TYPE_HASH,
635
.flags = EIP93_HASH_MD5,
636
.alg.ahash = {
637
.init = eip93_hash_init,
638
.update = eip93_hash_update,
639
.final = eip93_hash_final,
640
.finup = eip93_hash_finup,
641
.digest = eip93_hash_digest,
642
.export = eip93_hash_export,
643
.import = eip93_hash_import,
644
.halg = {
645
.digestsize = MD5_DIGEST_SIZE,
646
.statesize = sizeof(struct eip93_hash_export_state),
647
.base = {
648
.cra_name = "md5",
649
.cra_driver_name = "md5-eip93",
650
.cra_priority = 300,
651
.cra_flags = CRYPTO_ALG_ASYNC |
652
CRYPTO_ALG_KERN_DRIVER_ONLY |
653
CRYPTO_ALG_ALLOCATES_MEMORY,
654
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
655
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
656
.cra_init = eip93_hash_cra_init,
657
.cra_module = THIS_MODULE,
658
},
659
},
660
},
661
};
662
663
struct eip93_alg_template eip93_alg_sha1 = {
664
.type = EIP93_ALG_TYPE_HASH,
665
.flags = EIP93_HASH_SHA1,
666
.alg.ahash = {
667
.init = eip93_hash_init,
668
.update = eip93_hash_update,
669
.final = eip93_hash_final,
670
.finup = eip93_hash_finup,
671
.digest = eip93_hash_digest,
672
.export = eip93_hash_export,
673
.import = eip93_hash_import,
674
.halg = {
675
.digestsize = SHA1_DIGEST_SIZE,
676
.statesize = sizeof(struct eip93_hash_export_state),
677
.base = {
678
.cra_name = "sha1",
679
.cra_driver_name = "sha1-eip93",
680
.cra_priority = 300,
681
.cra_flags = CRYPTO_ALG_ASYNC |
682
CRYPTO_ALG_KERN_DRIVER_ONLY |
683
CRYPTO_ALG_ALLOCATES_MEMORY,
684
.cra_blocksize = SHA1_BLOCK_SIZE,
685
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
686
.cra_init = eip93_hash_cra_init,
687
.cra_module = THIS_MODULE,
688
},
689
},
690
},
691
};
692
693
struct eip93_alg_template eip93_alg_sha224 = {
694
.type = EIP93_ALG_TYPE_HASH,
695
.flags = EIP93_HASH_SHA224,
696
.alg.ahash = {
697
.init = eip93_hash_init,
698
.update = eip93_hash_update,
699
.final = eip93_hash_final,
700
.finup = eip93_hash_finup,
701
.digest = eip93_hash_digest,
702
.export = eip93_hash_export,
703
.import = eip93_hash_import,
704
.halg = {
705
.digestsize = SHA224_DIGEST_SIZE,
706
.statesize = sizeof(struct eip93_hash_export_state),
707
.base = {
708
.cra_name = "sha224",
709
.cra_driver_name = "sha224-eip93",
710
.cra_priority = 300,
711
.cra_flags = CRYPTO_ALG_ASYNC |
712
CRYPTO_ALG_KERN_DRIVER_ONLY |
713
CRYPTO_ALG_ALLOCATES_MEMORY,
714
.cra_blocksize = SHA224_BLOCK_SIZE,
715
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
716
.cra_init = eip93_hash_cra_init,
717
.cra_module = THIS_MODULE,
718
},
719
},
720
},
721
};
722
723
struct eip93_alg_template eip93_alg_sha256 = {
724
.type = EIP93_ALG_TYPE_HASH,
725
.flags = EIP93_HASH_SHA256,
726
.alg.ahash = {
727
.init = eip93_hash_init,
728
.update = eip93_hash_update,
729
.final = eip93_hash_final,
730
.finup = eip93_hash_finup,
731
.digest = eip93_hash_digest,
732
.export = eip93_hash_export,
733
.import = eip93_hash_import,
734
.halg = {
735
.digestsize = SHA256_DIGEST_SIZE,
736
.statesize = sizeof(struct eip93_hash_export_state),
737
.base = {
738
.cra_name = "sha256",
739
.cra_driver_name = "sha256-eip93",
740
.cra_priority = 300,
741
.cra_flags = CRYPTO_ALG_ASYNC |
742
CRYPTO_ALG_KERN_DRIVER_ONLY |
743
CRYPTO_ALG_ALLOCATES_MEMORY,
744
.cra_blocksize = SHA256_BLOCK_SIZE,
745
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
746
.cra_init = eip93_hash_cra_init,
747
.cra_module = THIS_MODULE,
748
},
749
},
750
},
751
};
752
753
struct eip93_alg_template eip93_alg_hmac_md5 = {
754
.type = EIP93_ALG_TYPE_HASH,
755
.flags = EIP93_HASH_HMAC | EIP93_HASH_MD5,
756
.alg.ahash = {
757
.init = eip93_hash_init,
758
.update = eip93_hash_update,
759
.final = eip93_hash_final,
760
.finup = eip93_hash_finup,
761
.digest = eip93_hash_digest,
762
.setkey = eip93_hash_hmac_setkey,
763
.export = eip93_hash_export,
764
.import = eip93_hash_import,
765
.halg = {
766
.digestsize = MD5_DIGEST_SIZE,
767
.statesize = sizeof(struct eip93_hash_export_state),
768
.base = {
769
.cra_name = "hmac(md5)",
770
.cra_driver_name = "hmac(md5-eip93)",
771
.cra_priority = 300,
772
.cra_flags = CRYPTO_ALG_ASYNC |
773
CRYPTO_ALG_KERN_DRIVER_ONLY |
774
CRYPTO_ALG_ALLOCATES_MEMORY,
775
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
776
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
777
.cra_init = eip93_hash_cra_init,
778
.cra_module = THIS_MODULE,
779
},
780
},
781
},
782
};
783
784
struct eip93_alg_template eip93_alg_hmac_sha1 = {
785
.type = EIP93_ALG_TYPE_HASH,
786
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1,
787
.alg.ahash = {
788
.init = eip93_hash_init,
789
.update = eip93_hash_update,
790
.final = eip93_hash_final,
791
.finup = eip93_hash_finup,
792
.digest = eip93_hash_digest,
793
.setkey = eip93_hash_hmac_setkey,
794
.export = eip93_hash_export,
795
.import = eip93_hash_import,
796
.halg = {
797
.digestsize = SHA1_DIGEST_SIZE,
798
.statesize = sizeof(struct eip93_hash_export_state),
799
.base = {
800
.cra_name = "hmac(sha1)",
801
.cra_driver_name = "hmac(sha1-eip93)",
802
.cra_priority = 300,
803
.cra_flags = CRYPTO_ALG_ASYNC |
804
CRYPTO_ALG_KERN_DRIVER_ONLY |
805
CRYPTO_ALG_ALLOCATES_MEMORY,
806
.cra_blocksize = SHA1_BLOCK_SIZE,
807
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
808
.cra_init = eip93_hash_cra_init,
809
.cra_module = THIS_MODULE,
810
},
811
},
812
},
813
};
814
815
struct eip93_alg_template eip93_alg_hmac_sha224 = {
816
.type = EIP93_ALG_TYPE_HASH,
817
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224,
818
.alg.ahash = {
819
.init = eip93_hash_init,
820
.update = eip93_hash_update,
821
.final = eip93_hash_final,
822
.finup = eip93_hash_finup,
823
.digest = eip93_hash_digest,
824
.setkey = eip93_hash_hmac_setkey,
825
.export = eip93_hash_export,
826
.import = eip93_hash_import,
827
.halg = {
828
.digestsize = SHA224_DIGEST_SIZE,
829
.statesize = sizeof(struct eip93_hash_export_state),
830
.base = {
831
.cra_name = "hmac(sha224)",
832
.cra_driver_name = "hmac(sha224-eip93)",
833
.cra_priority = 300,
834
.cra_flags = CRYPTO_ALG_ASYNC |
835
CRYPTO_ALG_KERN_DRIVER_ONLY |
836
CRYPTO_ALG_ALLOCATES_MEMORY,
837
.cra_blocksize = SHA224_BLOCK_SIZE,
838
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
839
.cra_init = eip93_hash_cra_init,
840
.cra_module = THIS_MODULE,
841
},
842
},
843
},
844
};
845
846
struct eip93_alg_template eip93_alg_hmac_sha256 = {
847
.type = EIP93_ALG_TYPE_HASH,
848
.flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256,
849
.alg.ahash = {
850
.init = eip93_hash_init,
851
.update = eip93_hash_update,
852
.final = eip93_hash_final,
853
.finup = eip93_hash_finup,
854
.digest = eip93_hash_digest,
855
.setkey = eip93_hash_hmac_setkey,
856
.export = eip93_hash_export,
857
.import = eip93_hash_import,
858
.halg = {
859
.digestsize = SHA256_DIGEST_SIZE,
860
.statesize = sizeof(struct eip93_hash_export_state),
861
.base = {
862
.cra_name = "hmac(sha256)",
863
.cra_driver_name = "hmac(sha256-eip93)",
864
.cra_priority = 300,
865
.cra_flags = CRYPTO_ALG_ASYNC |
866
CRYPTO_ALG_KERN_DRIVER_ONLY |
867
CRYPTO_ALG_ALLOCATES_MEMORY,
868
.cra_blocksize = SHA256_BLOCK_SIZE,
869
.cra_ctxsize = sizeof(struct eip93_hash_ctx),
870
.cra_init = eip93_hash_cra_init,
871
.cra_module = THIS_MODULE,
872
},
873
},
874
},
875
};
876
877