Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/marvell/cesa/hash.c
26288 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4
*
5
* Author: Boris Brezillon <[email protected]>
6
* Author: Arnaud Ebalard <[email protected]>
7
*
8
* This work is based on an initial version written by
9
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10
*/
11
12
#include <crypto/hmac.h>
13
#include <crypto/md5.h>
14
#include <crypto/sha1.h>
15
#include <crypto/sha2.h>
16
#include <linux/device.h>
17
#include <linux/dma-mapping.h>
18
19
#include "cesa.h"
20
21
struct mv_cesa_ahash_dma_iter {
22
struct mv_cesa_dma_iter base;
23
struct mv_cesa_sg_dma_iter src;
24
};
25
26
static inline void
27
mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
28
struct ahash_request *req)
29
{
30
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
31
unsigned int len = req->nbytes + creq->cache_ptr;
32
33
if (!creq->last_req)
34
len &= ~CESA_HASH_BLOCK_SIZE_MSK;
35
36
mv_cesa_req_dma_iter_init(&iter->base, len);
37
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
38
iter->src.op_offset = creq->cache_ptr;
39
}
40
41
static inline bool
42
mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
43
{
44
iter->src.op_offset = 0;
45
46
return mv_cesa_req_dma_iter_next_op(&iter->base);
47
}
48
49
static inline int
50
mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
51
{
52
req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
53
&req->cache_dma);
54
if (!req->cache)
55
return -ENOMEM;
56
57
return 0;
58
}
59
60
static inline void
61
mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
62
{
63
if (!req->cache)
64
return;
65
66
dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
67
req->cache_dma);
68
}
69
70
static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
71
gfp_t flags)
72
{
73
if (req->padding)
74
return 0;
75
76
req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
77
&req->padding_dma);
78
if (!req->padding)
79
return -ENOMEM;
80
81
return 0;
82
}
83
84
static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
85
{
86
if (!req->padding)
87
return;
88
89
dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
90
req->padding_dma);
91
req->padding = NULL;
92
}
93
94
static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
95
{
96
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
97
98
mv_cesa_ahash_dma_free_padding(&creq->req.dma);
99
}
100
101
static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
102
{
103
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
104
105
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
106
mv_cesa_ahash_dma_free_cache(&creq->req.dma);
107
mv_cesa_dma_cleanup(&creq->base);
108
}
109
110
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
111
{
112
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113
struct mv_cesa_engine *engine = creq->base.engine;
114
115
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
116
mv_cesa_ahash_dma_cleanup(req);
117
118
atomic_sub(req->nbytes, &engine->load);
119
}
120
121
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
122
{
123
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
124
125
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
126
mv_cesa_ahash_dma_last_cleanup(req);
127
}
128
129
static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
130
{
131
unsigned int index, padlen;
132
133
index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
134
padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
135
136
return padlen;
137
}
138
139
static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
140
{
141
unsigned int padlen;
142
143
buf[0] = 0x80;
144
/* Pad out to 56 mod 64 */
145
padlen = mv_cesa_ahash_pad_len(creq);
146
memset(buf + 1, 0, padlen - 1);
147
148
if (creq->algo_le) {
149
__le64 bits = cpu_to_le64(creq->len << 3);
150
151
memcpy(buf + padlen, &bits, sizeof(bits));
152
} else {
153
__be64 bits = cpu_to_be64(creq->len << 3);
154
155
memcpy(buf + padlen, &bits, sizeof(bits));
156
}
157
158
return padlen + 8;
159
}
160
161
static void mv_cesa_ahash_std_step(struct ahash_request *req)
162
{
163
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
164
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
165
struct mv_cesa_engine *engine = creq->base.engine;
166
struct mv_cesa_op_ctx *op;
167
unsigned int new_cache_ptr = 0;
168
u32 frag_mode;
169
size_t len;
170
unsigned int digsize;
171
int i;
172
173
mv_cesa_adjust_op(engine, &creq->op_tmpl);
174
if (engine->pool)
175
memcpy(engine->sram_pool, &creq->op_tmpl,
176
sizeof(creq->op_tmpl));
177
else
178
memcpy_toio(engine->sram, &creq->op_tmpl,
179
sizeof(creq->op_tmpl));
180
181
if (!sreq->offset) {
182
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
183
for (i = 0; i < digsize / 4; i++)
184
writel_relaxed(creq->state[i],
185
engine->regs + CESA_IVDIG(i));
186
}
187
188
if (creq->cache_ptr) {
189
if (engine->pool)
190
memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
191
creq->cache, creq->cache_ptr);
192
else
193
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
194
creq->cache, creq->cache_ptr);
195
}
196
197
len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
198
CESA_SA_SRAM_PAYLOAD_SIZE);
199
200
if (!creq->last_req) {
201
new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
202
len &= ~CESA_HASH_BLOCK_SIZE_MSK;
203
}
204
205
if (len - creq->cache_ptr)
206
sreq->offset += mv_cesa_sg_copy_to_sram(
207
engine, req->src, creq->src_nents,
208
CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
209
len - creq->cache_ptr, sreq->offset);
210
211
op = &creq->op_tmpl;
212
213
frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
214
215
if (creq->last_req && sreq->offset == req->nbytes &&
216
creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
217
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
218
frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
219
else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
220
frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
221
}
222
223
if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
224
frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
225
if (len &&
226
creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
227
mv_cesa_set_mac_op_total_len(op, creq->len);
228
} else {
229
int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
230
231
if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
232
len &= CESA_HASH_BLOCK_SIZE_MSK;
233
new_cache_ptr = 64 - trailerlen;
234
if (engine->pool)
235
memcpy(creq->cache,
236
engine->sram_pool +
237
CESA_SA_DATA_SRAM_OFFSET + len,
238
new_cache_ptr);
239
else
240
memcpy_fromio(creq->cache,
241
engine->sram +
242
CESA_SA_DATA_SRAM_OFFSET +
243
len,
244
new_cache_ptr);
245
} else {
246
i = mv_cesa_ahash_pad_req(creq, creq->cache);
247
len += i;
248
if (engine->pool)
249
memcpy(engine->sram_pool + len +
250
CESA_SA_DATA_SRAM_OFFSET,
251
creq->cache, i);
252
else
253
memcpy_toio(engine->sram + len +
254
CESA_SA_DATA_SRAM_OFFSET,
255
creq->cache, i);
256
}
257
258
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
259
frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
260
else
261
frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
262
}
263
}
264
265
mv_cesa_set_mac_op_frag_len(op, len);
266
mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
267
268
/* FIXME: only update enc_len field */
269
if (engine->pool)
270
memcpy(engine->sram_pool, op, sizeof(*op));
271
else
272
memcpy_toio(engine->sram, op, sizeof(*op));
273
274
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
275
mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
276
CESA_SA_DESC_CFG_FRAG_MSK);
277
278
creq->cache_ptr = new_cache_ptr;
279
280
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
281
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
282
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
283
CESA_SA_CMD_EN_CESA_SA_ACCL0);
284
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
285
}
286
287
static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
288
{
289
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
290
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
291
292
if (sreq->offset < (req->nbytes - creq->cache_ptr))
293
return -EINPROGRESS;
294
295
return 0;
296
}
297
298
static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
299
{
300
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
301
struct mv_cesa_req *basereq = &creq->base;
302
303
mv_cesa_dma_prepare(basereq, basereq->engine);
304
}
305
306
static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
307
{
308
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
309
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
310
311
sreq->offset = 0;
312
}
313
314
static void mv_cesa_ahash_dma_step(struct ahash_request *req)
315
{
316
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
317
struct mv_cesa_req *base = &creq->base;
318
319
/* We must explicitly set the digest state. */
320
if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
321
struct mv_cesa_engine *engine = base->engine;
322
int i;
323
324
/* Set the hash state in the IVDIG regs. */
325
for (i = 0; i < ARRAY_SIZE(creq->state); i++)
326
writel_relaxed(creq->state[i], engine->regs +
327
CESA_IVDIG(i));
328
}
329
330
mv_cesa_dma_step(base);
331
}
332
333
static void mv_cesa_ahash_step(struct crypto_async_request *req)
334
{
335
struct ahash_request *ahashreq = ahash_request_cast(req);
336
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
337
338
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
339
mv_cesa_ahash_dma_step(ahashreq);
340
else
341
mv_cesa_ahash_std_step(ahashreq);
342
}
343
344
static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
345
{
346
struct ahash_request *ahashreq = ahash_request_cast(req);
347
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
348
349
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
350
return mv_cesa_dma_process(&creq->base, status);
351
352
return mv_cesa_ahash_std_process(ahashreq, status);
353
}
354
355
static void mv_cesa_ahash_complete(struct crypto_async_request *req)
356
{
357
struct ahash_request *ahashreq = ahash_request_cast(req);
358
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
359
struct mv_cesa_engine *engine = creq->base.engine;
360
unsigned int digsize;
361
int i;
362
363
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
364
365
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
366
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
367
CESA_TDMA_RESULT) {
368
const void *data;
369
370
/*
371
* Result is already in the correct endianness when the SA is
372
* used
373
*/
374
data = creq->base.chain.last->op->ctx.hash.hash;
375
memcpy(ahashreq->result, data, digsize);
376
} else {
377
for (i = 0; i < digsize / 4; i++)
378
creq->state[i] = readl_relaxed(engine->regs +
379
CESA_IVDIG(i));
380
if (creq->last_req) {
381
/*
382
* Hardware's MD5 digest is in little endian format, but
383
* SHA in big endian format
384
*/
385
if (creq->algo_le) {
386
__le32 *result = (void *)ahashreq->result;
387
388
for (i = 0; i < digsize / 4; i++)
389
result[i] = cpu_to_le32(creq->state[i]);
390
} else {
391
__be32 *result = (void *)ahashreq->result;
392
393
for (i = 0; i < digsize / 4; i++)
394
result[i] = cpu_to_be32(creq->state[i]);
395
}
396
}
397
}
398
}
399
400
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
401
struct mv_cesa_engine *engine)
402
{
403
struct ahash_request *ahashreq = ahash_request_cast(req);
404
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
405
406
creq->base.engine = engine;
407
408
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
409
mv_cesa_ahash_dma_prepare(ahashreq);
410
else
411
mv_cesa_ahash_std_prepare(ahashreq);
412
}
413
414
static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
415
{
416
struct ahash_request *ahashreq = ahash_request_cast(req);
417
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
418
419
if (creq->last_req)
420
mv_cesa_ahash_last_cleanup(ahashreq);
421
422
mv_cesa_ahash_cleanup(ahashreq);
423
424
if (creq->cache_ptr)
425
sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
426
creq->cache,
427
creq->cache_ptr,
428
ahashreq->nbytes - creq->cache_ptr);
429
}
430
431
static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
432
.step = mv_cesa_ahash_step,
433
.process = mv_cesa_ahash_process,
434
.cleanup = mv_cesa_ahash_req_cleanup,
435
.complete = mv_cesa_ahash_complete,
436
};
437
438
static void mv_cesa_ahash_init(struct ahash_request *req,
439
struct mv_cesa_op_ctx *tmpl, bool algo_le)
440
{
441
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
442
443
memset(creq, 0, sizeof(*creq));
444
mv_cesa_update_op_cfg(tmpl,
445
CESA_SA_DESC_CFG_OP_MAC_ONLY |
446
CESA_SA_DESC_CFG_FIRST_FRAG,
447
CESA_SA_DESC_CFG_OP_MSK |
448
CESA_SA_DESC_CFG_FRAG_MSK);
449
mv_cesa_set_mac_op_total_len(tmpl, 0);
450
mv_cesa_set_mac_op_frag_len(tmpl, 0);
451
creq->op_tmpl = *tmpl;
452
creq->len = 0;
453
creq->algo_le = algo_le;
454
}
455
456
static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
457
{
458
struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
459
460
ctx->base.ops = &mv_cesa_ahash_req_ops;
461
462
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
463
sizeof(struct mv_cesa_ahash_req));
464
return 0;
465
}
466
467
static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
468
{
469
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
470
bool cached = false;
471
472
if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
473
!creq->last_req) {
474
cached = true;
475
476
if (!req->nbytes)
477
return cached;
478
479
sg_pcopy_to_buffer(req->src, creq->src_nents,
480
creq->cache + creq->cache_ptr,
481
req->nbytes, 0);
482
483
creq->cache_ptr += req->nbytes;
484
}
485
486
return cached;
487
}
488
489
static struct mv_cesa_op_ctx *
490
mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
491
struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
492
gfp_t flags)
493
{
494
struct mv_cesa_op_ctx *op;
495
int ret;
496
497
op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
498
if (IS_ERR(op))
499
return op;
500
501
/* Set the operation block fragment length. */
502
mv_cesa_set_mac_op_frag_len(op, frag_len);
503
504
/* Append dummy desc to launch operation */
505
ret = mv_cesa_dma_add_dummy_launch(chain, flags);
506
if (ret)
507
return ERR_PTR(ret);
508
509
if (mv_cesa_mac_op_is_first_frag(tmpl))
510
mv_cesa_update_op_cfg(tmpl,
511
CESA_SA_DESC_CFG_MID_FRAG,
512
CESA_SA_DESC_CFG_FRAG_MSK);
513
514
return op;
515
}
516
517
static int
518
mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
519
struct mv_cesa_ahash_req *creq,
520
gfp_t flags)
521
{
522
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
523
int ret;
524
525
if (!creq->cache_ptr)
526
return 0;
527
528
ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
529
if (ret)
530
return ret;
531
532
memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
533
534
return mv_cesa_dma_add_data_transfer(chain,
535
CESA_SA_DATA_SRAM_OFFSET,
536
ahashdreq->cache_dma,
537
creq->cache_ptr,
538
CESA_TDMA_DST_IN_SRAM,
539
flags);
540
}
541
542
static struct mv_cesa_op_ctx *
543
mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
544
struct mv_cesa_ahash_dma_iter *dma_iter,
545
struct mv_cesa_ahash_req *creq,
546
unsigned int frag_len, gfp_t flags)
547
{
548
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
549
unsigned int len, trailerlen, padoff = 0;
550
struct mv_cesa_op_ctx *op;
551
int ret;
552
553
/*
554
* If the transfer is smaller than our maximum length, and we have
555
* some data outstanding, we can ask the engine to finish the hash.
556
*/
557
if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
558
op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
559
flags);
560
if (IS_ERR(op))
561
return op;
562
563
mv_cesa_set_mac_op_total_len(op, creq->len);
564
mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
565
CESA_SA_DESC_CFG_NOT_FRAG :
566
CESA_SA_DESC_CFG_LAST_FRAG,
567
CESA_SA_DESC_CFG_FRAG_MSK);
568
569
ret = mv_cesa_dma_add_result_op(chain,
570
CESA_SA_CFG_SRAM_OFFSET,
571
CESA_SA_DATA_SRAM_OFFSET,
572
CESA_TDMA_SRC_IN_SRAM, flags);
573
if (ret)
574
return ERR_PTR(-ENOMEM);
575
return op;
576
}
577
578
/*
579
* The request is longer than the engine can handle, or we have
580
* no data outstanding. Manually generate the padding, adding it
581
* as a "mid" fragment.
582
*/
583
ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
584
if (ret)
585
return ERR_PTR(ret);
586
587
trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
588
589
len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
590
if (len) {
591
ret = mv_cesa_dma_add_data_transfer(chain,
592
CESA_SA_DATA_SRAM_OFFSET +
593
frag_len,
594
ahashdreq->padding_dma,
595
len, CESA_TDMA_DST_IN_SRAM,
596
flags);
597
if (ret)
598
return ERR_PTR(ret);
599
600
op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
601
flags);
602
if (IS_ERR(op))
603
return op;
604
605
if (len == trailerlen)
606
return op;
607
608
padoff += len;
609
}
610
611
ret = mv_cesa_dma_add_data_transfer(chain,
612
CESA_SA_DATA_SRAM_OFFSET,
613
ahashdreq->padding_dma +
614
padoff,
615
trailerlen - padoff,
616
CESA_TDMA_DST_IN_SRAM,
617
flags);
618
if (ret)
619
return ERR_PTR(ret);
620
621
return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
622
flags);
623
}
624
625
static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
626
{
627
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
628
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
629
GFP_KERNEL : GFP_ATOMIC;
630
struct mv_cesa_req *basereq = &creq->base;
631
struct mv_cesa_ahash_dma_iter iter;
632
struct mv_cesa_op_ctx *op = NULL;
633
unsigned int frag_len;
634
bool set_state = false;
635
int ret;
636
u32 type;
637
638
basereq->chain.first = NULL;
639
basereq->chain.last = NULL;
640
641
if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
642
set_state = true;
643
644
if (creq->src_nents) {
645
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
646
DMA_TO_DEVICE);
647
if (!ret) {
648
ret = -ENOMEM;
649
goto err;
650
}
651
}
652
653
mv_cesa_tdma_desc_iter_init(&basereq->chain);
654
mv_cesa_ahash_req_iter_init(&iter, req);
655
656
/*
657
* Add the cache (left-over data from a previous block) first.
658
* This will never overflow the SRAM size.
659
*/
660
ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
661
if (ret)
662
goto err_free_tdma;
663
664
if (iter.base.len > iter.src.op_offset) {
665
/*
666
* Add all the new data, inserting an operation block and
667
* launch command between each full SRAM block-worth of
668
* data. We intentionally do not add the final op block.
669
*/
670
while (true) {
671
ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
672
&iter.base,
673
&iter.src, flags);
674
if (ret)
675
goto err_free_tdma;
676
677
frag_len = iter.base.op_len;
678
679
if (!mv_cesa_ahash_req_iter_next_op(&iter))
680
break;
681
682
op = mv_cesa_dma_add_frag(&basereq->chain,
683
&creq->op_tmpl,
684
frag_len, flags);
685
if (IS_ERR(op)) {
686
ret = PTR_ERR(op);
687
goto err_free_tdma;
688
}
689
}
690
} else {
691
/* Account for the data that was in the cache. */
692
frag_len = iter.base.op_len;
693
}
694
695
/*
696
* At this point, frag_len indicates whether we have any data
697
* outstanding which needs an operation. Queue up the final
698
* operation, which depends whether this is the final request.
699
*/
700
if (creq->last_req)
701
op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
702
frag_len, flags);
703
else if (frag_len)
704
op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
705
frag_len, flags);
706
707
if (IS_ERR(op)) {
708
ret = PTR_ERR(op);
709
goto err_free_tdma;
710
}
711
712
/*
713
* If results are copied via DMA, this means that this
714
* request can be directly processed by the engine,
715
* without partial updates. So we can chain it at the
716
* DMA level with other requests.
717
*/
718
type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
719
720
if (op && type != CESA_TDMA_RESULT) {
721
/* Add dummy desc to wait for crypto operation end */
722
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
723
if (ret)
724
goto err_free_tdma;
725
}
726
727
if (!creq->last_req)
728
creq->cache_ptr = req->nbytes + creq->cache_ptr -
729
iter.base.len;
730
else
731
creq->cache_ptr = 0;
732
733
basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
734
735
if (type != CESA_TDMA_RESULT)
736
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
737
738
if (set_state) {
739
/*
740
* Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
741
* let the step logic know that the IVDIG registers should be
742
* explicitly set before launching a TDMA chain.
743
*/
744
basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
745
}
746
747
return 0;
748
749
err_free_tdma:
750
mv_cesa_dma_cleanup(basereq);
751
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
752
753
err:
754
mv_cesa_ahash_last_cleanup(req);
755
756
return ret;
757
}
758
759
static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
760
{
761
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
762
763
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
764
if (creq->src_nents < 0) {
765
dev_err(cesa_dev->dev, "Invalid number of src SG");
766
return creq->src_nents;
767
}
768
769
*cached = mv_cesa_ahash_cache_req(req);
770
771
if (*cached)
772
return 0;
773
774
if (cesa_dev->caps->has_tdma)
775
return mv_cesa_ahash_dma_req_init(req);
776
else
777
return 0;
778
}
779
780
static int mv_cesa_ahash_queue_req(struct ahash_request *req)
781
{
782
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
783
struct mv_cesa_engine *engine;
784
bool cached = false;
785
int ret;
786
787
ret = mv_cesa_ahash_req_init(req, &cached);
788
if (ret)
789
return ret;
790
791
if (cached)
792
return 0;
793
794
engine = mv_cesa_select_engine(req->nbytes);
795
mv_cesa_ahash_prepare(&req->base, engine);
796
797
ret = mv_cesa_queue_req(&req->base, &creq->base);
798
799
if (mv_cesa_req_needs_cleanup(&req->base, ret))
800
mv_cesa_ahash_cleanup(req);
801
802
return ret;
803
}
804
805
static int mv_cesa_ahash_update(struct ahash_request *req)
806
{
807
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
808
809
creq->len += req->nbytes;
810
811
return mv_cesa_ahash_queue_req(req);
812
}
813
814
static int mv_cesa_ahash_final(struct ahash_request *req)
815
{
816
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
817
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
818
819
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
820
creq->last_req = true;
821
req->nbytes = 0;
822
823
return mv_cesa_ahash_queue_req(req);
824
}
825
826
static int mv_cesa_ahash_finup(struct ahash_request *req)
827
{
828
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
829
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
830
831
creq->len += req->nbytes;
832
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
833
creq->last_req = true;
834
835
return mv_cesa_ahash_queue_req(req);
836
}
837
838
static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
839
u64 *len, void *cache)
840
{
841
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
842
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
843
unsigned int digsize = crypto_ahash_digestsize(ahash);
844
unsigned int blocksize;
845
846
blocksize = crypto_ahash_blocksize(ahash);
847
848
*len = creq->len;
849
memcpy(hash, creq->state, digsize);
850
memset(cache, 0, blocksize);
851
memcpy(cache, creq->cache, creq->cache_ptr);
852
853
return 0;
854
}
855
856
static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
857
u64 len, const void *cache)
858
{
859
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
860
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
861
unsigned int digsize = crypto_ahash_digestsize(ahash);
862
unsigned int blocksize;
863
unsigned int cache_ptr;
864
int ret;
865
866
ret = crypto_ahash_init(req);
867
if (ret)
868
return ret;
869
870
blocksize = crypto_ahash_blocksize(ahash);
871
if (len >= blocksize)
872
mv_cesa_update_op_cfg(&creq->op_tmpl,
873
CESA_SA_DESC_CFG_MID_FRAG,
874
CESA_SA_DESC_CFG_FRAG_MSK);
875
876
creq->len = len;
877
memcpy(creq->state, hash, digsize);
878
creq->cache_ptr = 0;
879
880
cache_ptr = do_div(len, blocksize);
881
if (!cache_ptr)
882
return 0;
883
884
memcpy(creq->cache, cache, cache_ptr);
885
creq->cache_ptr = cache_ptr;
886
887
return 0;
888
}
889
890
static int mv_cesa_md5_init(struct ahash_request *req)
891
{
892
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
893
struct mv_cesa_op_ctx tmpl = { };
894
895
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
896
897
mv_cesa_ahash_init(req, &tmpl, true);
898
899
creq->state[0] = MD5_H0;
900
creq->state[1] = MD5_H1;
901
creq->state[2] = MD5_H2;
902
creq->state[3] = MD5_H3;
903
904
return 0;
905
}
906
907
static int mv_cesa_md5_export(struct ahash_request *req, void *out)
908
{
909
struct md5_state *out_state = out;
910
911
return mv_cesa_ahash_export(req, out_state->hash,
912
&out_state->byte_count, out_state->block);
913
}
914
915
static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
916
{
917
const struct md5_state *in_state = in;
918
919
return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
920
in_state->block);
921
}
922
923
static int mv_cesa_md5_digest(struct ahash_request *req)
924
{
925
int ret;
926
927
ret = mv_cesa_md5_init(req);
928
if (ret)
929
return ret;
930
931
return mv_cesa_ahash_finup(req);
932
}
933
934
struct ahash_alg mv_md5_alg = {
935
.init = mv_cesa_md5_init,
936
.update = mv_cesa_ahash_update,
937
.final = mv_cesa_ahash_final,
938
.finup = mv_cesa_ahash_finup,
939
.digest = mv_cesa_md5_digest,
940
.export = mv_cesa_md5_export,
941
.import = mv_cesa_md5_import,
942
.halg = {
943
.digestsize = MD5_DIGEST_SIZE,
944
.statesize = sizeof(struct md5_state),
945
.base = {
946
.cra_name = "md5",
947
.cra_driver_name = "mv-md5",
948
.cra_priority = 0,
949
.cra_flags = CRYPTO_ALG_ASYNC |
950
CRYPTO_ALG_ALLOCATES_MEMORY |
951
CRYPTO_ALG_KERN_DRIVER_ONLY,
952
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
953
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
954
.cra_init = mv_cesa_ahash_cra_init,
955
.cra_module = THIS_MODULE,
956
}
957
}
958
};
959
960
static int mv_cesa_sha1_init(struct ahash_request *req)
961
{
962
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
963
struct mv_cesa_op_ctx tmpl = { };
964
965
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
966
967
mv_cesa_ahash_init(req, &tmpl, false);
968
969
creq->state[0] = SHA1_H0;
970
creq->state[1] = SHA1_H1;
971
creq->state[2] = SHA1_H2;
972
creq->state[3] = SHA1_H3;
973
creq->state[4] = SHA1_H4;
974
975
return 0;
976
}
977
978
static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
979
{
980
struct sha1_state *out_state = out;
981
982
return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
983
out_state->buffer);
984
}
985
986
static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
987
{
988
const struct sha1_state *in_state = in;
989
990
return mv_cesa_ahash_import(req, in_state->state, in_state->count,
991
in_state->buffer);
992
}
993
994
static int mv_cesa_sha1_digest(struct ahash_request *req)
995
{
996
int ret;
997
998
ret = mv_cesa_sha1_init(req);
999
if (ret)
1000
return ret;
1001
1002
return mv_cesa_ahash_finup(req);
1003
}
1004
1005
struct ahash_alg mv_sha1_alg = {
1006
.init = mv_cesa_sha1_init,
1007
.update = mv_cesa_ahash_update,
1008
.final = mv_cesa_ahash_final,
1009
.finup = mv_cesa_ahash_finup,
1010
.digest = mv_cesa_sha1_digest,
1011
.export = mv_cesa_sha1_export,
1012
.import = mv_cesa_sha1_import,
1013
.halg = {
1014
.digestsize = SHA1_DIGEST_SIZE,
1015
.statesize = sizeof(struct sha1_state),
1016
.base = {
1017
.cra_name = "sha1",
1018
.cra_driver_name = "mv-sha1",
1019
.cra_priority = 0,
1020
.cra_flags = CRYPTO_ALG_ASYNC |
1021
CRYPTO_ALG_ALLOCATES_MEMORY |
1022
CRYPTO_ALG_KERN_DRIVER_ONLY,
1023
.cra_blocksize = SHA1_BLOCK_SIZE,
1024
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1025
.cra_init = mv_cesa_ahash_cra_init,
1026
.cra_module = THIS_MODULE,
1027
}
1028
}
1029
};
1030
1031
static int mv_cesa_sha256_init(struct ahash_request *req)
1032
{
1033
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1034
struct mv_cesa_op_ctx tmpl = { };
1035
1036
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1037
1038
mv_cesa_ahash_init(req, &tmpl, false);
1039
1040
creq->state[0] = SHA256_H0;
1041
creq->state[1] = SHA256_H1;
1042
creq->state[2] = SHA256_H2;
1043
creq->state[3] = SHA256_H3;
1044
creq->state[4] = SHA256_H4;
1045
creq->state[5] = SHA256_H5;
1046
creq->state[6] = SHA256_H6;
1047
creq->state[7] = SHA256_H7;
1048
1049
return 0;
1050
}
1051
1052
static int mv_cesa_sha256_digest(struct ahash_request *req)
1053
{
1054
int ret;
1055
1056
ret = mv_cesa_sha256_init(req);
1057
if (ret)
1058
return ret;
1059
1060
return mv_cesa_ahash_finup(req);
1061
}
1062
1063
static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1064
{
1065
struct sha256_state *out_state = out;
1066
1067
return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1068
out_state->buf);
1069
}
1070
1071
static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1072
{
1073
const struct sha256_state *in_state = in;
1074
1075
return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1076
in_state->buf);
1077
}
1078
1079
struct ahash_alg mv_sha256_alg = {
1080
.init = mv_cesa_sha256_init,
1081
.update = mv_cesa_ahash_update,
1082
.final = mv_cesa_ahash_final,
1083
.finup = mv_cesa_ahash_finup,
1084
.digest = mv_cesa_sha256_digest,
1085
.export = mv_cesa_sha256_export,
1086
.import = mv_cesa_sha256_import,
1087
.halg = {
1088
.digestsize = SHA256_DIGEST_SIZE,
1089
.statesize = sizeof(struct sha256_state),
1090
.base = {
1091
.cra_name = "sha256",
1092
.cra_driver_name = "mv-sha256",
1093
.cra_priority = 0,
1094
.cra_flags = CRYPTO_ALG_ASYNC |
1095
CRYPTO_ALG_ALLOCATES_MEMORY |
1096
CRYPTO_ALG_KERN_DRIVER_ONLY,
1097
.cra_blocksize = SHA256_BLOCK_SIZE,
1098
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1099
.cra_init = mv_cesa_ahash_cra_init,
1100
.cra_module = THIS_MODULE,
1101
}
1102
}
1103
};
1104
1105
static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1106
void *state, unsigned int blocksize)
1107
{
1108
DECLARE_CRYPTO_WAIT(result);
1109
struct scatterlist sg;
1110
int ret;
1111
1112
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1113
crypto_req_done, &result);
1114
sg_init_one(&sg, pad, blocksize);
1115
ahash_request_set_crypt(req, &sg, pad, blocksize);
1116
1117
ret = crypto_ahash_init(req);
1118
if (ret)
1119
return ret;
1120
1121
ret = crypto_ahash_update(req);
1122
ret = crypto_wait_req(ret, &result);
1123
1124
if (ret)
1125
return ret;
1126
1127
ret = crypto_ahash_export(req, state);
1128
if (ret)
1129
return ret;
1130
1131
return 0;
1132
}
1133
1134
static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1135
const u8 *key, unsigned int keylen,
1136
u8 *ipad, u8 *opad,
1137
unsigned int blocksize)
1138
{
1139
DECLARE_CRYPTO_WAIT(result);
1140
struct scatterlist sg;
1141
int ret;
1142
int i;
1143
1144
if (keylen <= blocksize) {
1145
memcpy(ipad, key, keylen);
1146
} else {
1147
u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1148
1149
if (!keydup)
1150
return -ENOMEM;
1151
1152
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1153
crypto_req_done, &result);
1154
sg_init_one(&sg, keydup, keylen);
1155
ahash_request_set_crypt(req, &sg, ipad, keylen);
1156
1157
ret = crypto_ahash_digest(req);
1158
ret = crypto_wait_req(ret, &result);
1159
1160
/* Set the memory region to 0 to avoid any leak. */
1161
kfree_sensitive(keydup);
1162
1163
if (ret)
1164
return ret;
1165
1166
keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1167
}
1168
1169
memset(ipad + keylen, 0, blocksize - keylen);
1170
memcpy(opad, ipad, blocksize);
1171
1172
for (i = 0; i < blocksize; i++) {
1173
ipad[i] ^= HMAC_IPAD_VALUE;
1174
opad[i] ^= HMAC_OPAD_VALUE;
1175
}
1176
1177
return 0;
1178
}
1179
1180
static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1181
const u8 *key, unsigned int keylen,
1182
void *istate, void *ostate)
1183
{
1184
struct ahash_request *req;
1185
struct crypto_ahash *tfm;
1186
unsigned int blocksize;
1187
u8 *ipad = NULL;
1188
u8 *opad;
1189
int ret;
1190
1191
tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1192
if (IS_ERR(tfm))
1193
return PTR_ERR(tfm);
1194
1195
req = ahash_request_alloc(tfm, GFP_KERNEL);
1196
if (!req) {
1197
ret = -ENOMEM;
1198
goto free_ahash;
1199
}
1200
1201
crypto_ahash_clear_flags(tfm, ~0);
1202
1203
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1204
1205
ipad = kcalloc(2, blocksize, GFP_KERNEL);
1206
if (!ipad) {
1207
ret = -ENOMEM;
1208
goto free_req;
1209
}
1210
1211
opad = ipad + blocksize;
1212
1213
ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1214
if (ret)
1215
goto free_ipad;
1216
1217
ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1218
if (ret)
1219
goto free_ipad;
1220
1221
ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1222
1223
free_ipad:
1224
kfree(ipad);
1225
free_req:
1226
ahash_request_free(req);
1227
free_ahash:
1228
crypto_free_ahash(tfm);
1229
1230
return ret;
1231
}
1232
1233
static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1234
{
1235
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1236
1237
ctx->base.ops = &mv_cesa_ahash_req_ops;
1238
1239
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1240
sizeof(struct mv_cesa_ahash_req));
1241
return 0;
1242
}
1243
1244
static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1245
{
1246
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1247
struct mv_cesa_op_ctx tmpl = { };
1248
1249
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1250
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1251
1252
mv_cesa_ahash_init(req, &tmpl, true);
1253
1254
return 0;
1255
}
1256
1257
static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1258
unsigned int keylen)
1259
{
1260
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1261
struct md5_state istate, ostate;
1262
int ret, i;
1263
1264
ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1265
if (ret)
1266
return ret;
1267
1268
for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1269
ctx->iv[i] = cpu_to_be32(istate.hash[i]);
1270
1271
for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1272
ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
1273
1274
return 0;
1275
}
1276
1277
static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1278
{
1279
int ret;
1280
1281
ret = mv_cesa_ahmac_md5_init(req);
1282
if (ret)
1283
return ret;
1284
1285
return mv_cesa_ahash_finup(req);
1286
}
1287
1288
struct ahash_alg mv_ahmac_md5_alg = {
1289
.init = mv_cesa_ahmac_md5_init,
1290
.update = mv_cesa_ahash_update,
1291
.final = mv_cesa_ahash_final,
1292
.finup = mv_cesa_ahash_finup,
1293
.digest = mv_cesa_ahmac_md5_digest,
1294
.setkey = mv_cesa_ahmac_md5_setkey,
1295
.export = mv_cesa_md5_export,
1296
.import = mv_cesa_md5_import,
1297
.halg = {
1298
.digestsize = MD5_DIGEST_SIZE,
1299
.statesize = sizeof(struct md5_state),
1300
.base = {
1301
.cra_name = "hmac(md5)",
1302
.cra_driver_name = "mv-hmac-md5",
1303
.cra_priority = 0,
1304
.cra_flags = CRYPTO_ALG_ASYNC |
1305
CRYPTO_ALG_ALLOCATES_MEMORY |
1306
CRYPTO_ALG_KERN_DRIVER_ONLY,
1307
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1308
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1309
.cra_init = mv_cesa_ahmac_cra_init,
1310
.cra_module = THIS_MODULE,
1311
}
1312
}
1313
};
1314
1315
static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1316
{
1317
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1318
struct mv_cesa_op_ctx tmpl = { };
1319
1320
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1321
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1322
1323
mv_cesa_ahash_init(req, &tmpl, false);
1324
1325
return 0;
1326
}
1327
1328
static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1329
unsigned int keylen)
1330
{
1331
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1332
struct sha1_state istate, ostate;
1333
int ret, i;
1334
1335
ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1336
if (ret)
1337
return ret;
1338
1339
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1340
ctx->iv[i] = cpu_to_be32(istate.state[i]);
1341
1342
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1343
ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1344
1345
return 0;
1346
}
1347
1348
static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1349
{
1350
int ret;
1351
1352
ret = mv_cesa_ahmac_sha1_init(req);
1353
if (ret)
1354
return ret;
1355
1356
return mv_cesa_ahash_finup(req);
1357
}
1358
1359
struct ahash_alg mv_ahmac_sha1_alg = {
1360
.init = mv_cesa_ahmac_sha1_init,
1361
.update = mv_cesa_ahash_update,
1362
.final = mv_cesa_ahash_final,
1363
.finup = mv_cesa_ahash_finup,
1364
.digest = mv_cesa_ahmac_sha1_digest,
1365
.setkey = mv_cesa_ahmac_sha1_setkey,
1366
.export = mv_cesa_sha1_export,
1367
.import = mv_cesa_sha1_import,
1368
.halg = {
1369
.digestsize = SHA1_DIGEST_SIZE,
1370
.statesize = sizeof(struct sha1_state),
1371
.base = {
1372
.cra_name = "hmac(sha1)",
1373
.cra_driver_name = "mv-hmac-sha1",
1374
.cra_priority = 0,
1375
.cra_flags = CRYPTO_ALG_ASYNC |
1376
CRYPTO_ALG_ALLOCATES_MEMORY |
1377
CRYPTO_ALG_KERN_DRIVER_ONLY,
1378
.cra_blocksize = SHA1_BLOCK_SIZE,
1379
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1380
.cra_init = mv_cesa_ahmac_cra_init,
1381
.cra_module = THIS_MODULE,
1382
}
1383
}
1384
};
1385
1386
static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1387
unsigned int keylen)
1388
{
1389
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1390
struct sha256_state istate, ostate;
1391
int ret, i;
1392
1393
ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1394
if (ret)
1395
return ret;
1396
1397
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1398
ctx->iv[i] = cpu_to_be32(istate.state[i]);
1399
1400
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1401
ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1402
1403
return 0;
1404
}
1405
1406
static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1407
{
1408
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1409
struct mv_cesa_op_ctx tmpl = { };
1410
1411
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1412
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1413
1414
mv_cesa_ahash_init(req, &tmpl, false);
1415
1416
return 0;
1417
}
1418
1419
static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1420
{
1421
int ret;
1422
1423
ret = mv_cesa_ahmac_sha256_init(req);
1424
if (ret)
1425
return ret;
1426
1427
return mv_cesa_ahash_finup(req);
1428
}
1429
1430
struct ahash_alg mv_ahmac_sha256_alg = {
1431
.init = mv_cesa_ahmac_sha256_init,
1432
.update = mv_cesa_ahash_update,
1433
.final = mv_cesa_ahash_final,
1434
.finup = mv_cesa_ahash_finup,
1435
.digest = mv_cesa_ahmac_sha256_digest,
1436
.setkey = mv_cesa_ahmac_sha256_setkey,
1437
.export = mv_cesa_sha256_export,
1438
.import = mv_cesa_sha256_import,
1439
.halg = {
1440
.digestsize = SHA256_DIGEST_SIZE,
1441
.statesize = sizeof(struct sha256_state),
1442
.base = {
1443
.cra_name = "hmac(sha256)",
1444
.cra_driver_name = "mv-hmac-sha256",
1445
.cra_priority = 0,
1446
.cra_flags = CRYPTO_ALG_ASYNC |
1447
CRYPTO_ALG_ALLOCATES_MEMORY |
1448
CRYPTO_ALG_KERN_DRIVER_ONLY,
1449
.cra_blocksize = SHA256_BLOCK_SIZE,
1450
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1451
.cra_init = mv_cesa_ahmac_cra_init,
1452
.cra_module = THIS_MODULE,
1453
}
1454
}
1455
};
1456
1457