Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/img-hash.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (c) 2014 Imagination Technologies
4
* Authors: Will Thomas, James Hartley
5
*
6
* Interface structure taken from omap-sham driver
7
*/
8
9
#include <linux/clk.h>
10
#include <linux/dma-mapping.h>
11
#include <linux/dmaengine.h>
12
#include <linux/interrupt.h>
13
#include <linux/io.h>
14
#include <linux/kernel.h>
15
#include <linux/module.h>
16
#include <linux/mod_devicetable.h>
17
#include <linux/platform_device.h>
18
#include <linux/scatterlist.h>
19
20
#include <crypto/internal/hash.h>
21
#include <crypto/md5.h>
22
#include <crypto/sha1.h>
23
#include <crypto/sha2.h>
24
25
#define CR_RESET 0
26
#define CR_RESET_SET 1
27
#define CR_RESET_UNSET 0
28
29
#define CR_MESSAGE_LENGTH_H 0x4
30
#define CR_MESSAGE_LENGTH_L 0x8
31
32
#define CR_CONTROL 0xc
33
#define CR_CONTROL_BYTE_ORDER_3210 0
34
#define CR_CONTROL_BYTE_ORDER_0123 1
35
#define CR_CONTROL_BYTE_ORDER_2310 2
36
#define CR_CONTROL_BYTE_ORDER_1032 3
37
#define CR_CONTROL_BYTE_ORDER_SHIFT 8
38
#define CR_CONTROL_ALGO_MD5 0
39
#define CR_CONTROL_ALGO_SHA1 1
40
#define CR_CONTROL_ALGO_SHA224 2
41
#define CR_CONTROL_ALGO_SHA256 3
42
43
#define CR_INTSTAT 0x10
44
#define CR_INTENAB 0x14
45
#define CR_INTCLEAR 0x18
46
#define CR_INT_RESULTS_AVAILABLE BIT(0)
47
#define CR_INT_NEW_RESULTS_SET BIT(1)
48
#define CR_INT_RESULT_READ_ERR BIT(2)
49
#define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
50
#define CR_INT_STATUS BIT(8)
51
52
#define CR_RESULT_QUEUE 0x1c
53
#define CR_RSD0 0x40
54
#define CR_CORE_REV 0x50
55
#define CR_CORE_DES1 0x60
56
#define CR_CORE_DES2 0x70
57
58
#define DRIVER_FLAGS_BUSY BIT(0)
59
#define DRIVER_FLAGS_FINAL BIT(1)
60
#define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
61
#define DRIVER_FLAGS_OUTPUT_READY BIT(3)
62
#define DRIVER_FLAGS_INIT BIT(4)
63
#define DRIVER_FLAGS_CPU BIT(5)
64
#define DRIVER_FLAGS_DMA_READY BIT(6)
65
#define DRIVER_FLAGS_ERROR BIT(7)
66
#define DRIVER_FLAGS_SG BIT(8)
67
#define DRIVER_FLAGS_SHA1 BIT(18)
68
#define DRIVER_FLAGS_SHA224 BIT(19)
69
#define DRIVER_FLAGS_SHA256 BIT(20)
70
#define DRIVER_FLAGS_MD5 BIT(21)
71
72
#define IMG_HASH_QUEUE_LENGTH 20
73
#define IMG_HASH_DMA_BURST 4
74
#define IMG_HASH_DMA_THRESHOLD 64
75
76
#ifdef __LITTLE_ENDIAN
77
#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
78
#else
79
#define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
80
#endif
81
82
struct img_hash_dev;
83
84
struct img_hash_request_ctx {
85
struct img_hash_dev *hdev;
86
u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
87
unsigned long flags;
88
size_t digsize;
89
90
dma_addr_t dma_addr;
91
size_t dma_ct;
92
93
/* sg root */
94
struct scatterlist *sgfirst;
95
/* walk state */
96
struct scatterlist *sg;
97
size_t nents;
98
size_t offset;
99
unsigned int total;
100
size_t sent;
101
102
unsigned long op;
103
104
size_t bufcnt;
105
struct ahash_request fallback_req;
106
107
/* Zero length buffer must remain last member of struct */
108
u8 buffer[] __aligned(sizeof(u32));
109
};
110
111
struct img_hash_ctx {
112
struct img_hash_dev *hdev;
113
unsigned long flags;
114
struct crypto_ahash *fallback;
115
};
116
117
struct img_hash_dev {
118
struct list_head list;
119
struct device *dev;
120
struct clk *hash_clk;
121
struct clk *sys_clk;
122
void __iomem *io_base;
123
124
phys_addr_t bus_addr;
125
void __iomem *cpu_addr;
126
127
spinlock_t lock;
128
int err;
129
struct tasklet_struct done_task;
130
struct tasklet_struct dma_task;
131
132
unsigned long flags;
133
struct crypto_queue queue;
134
struct ahash_request *req;
135
136
struct dma_chan *dma_lch;
137
};
138
139
struct img_hash_drv {
140
struct list_head dev_list;
141
spinlock_t lock;
142
};
143
144
static struct img_hash_drv img_hash = {
145
.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
146
.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
147
};
148
149
static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
150
{
151
return readl_relaxed(hdev->io_base + offset);
152
}
153
154
static inline void img_hash_write(struct img_hash_dev *hdev,
155
u32 offset, u32 value)
156
{
157
writel_relaxed(value, hdev->io_base + offset);
158
}
159
160
static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
161
{
162
return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
163
}
164
165
static void img_hash_start(struct img_hash_dev *hdev, bool dma)
166
{
167
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
168
u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
169
170
if (ctx->flags & DRIVER_FLAGS_MD5)
171
cr |= CR_CONTROL_ALGO_MD5;
172
else if (ctx->flags & DRIVER_FLAGS_SHA1)
173
cr |= CR_CONTROL_ALGO_SHA1;
174
else if (ctx->flags & DRIVER_FLAGS_SHA224)
175
cr |= CR_CONTROL_ALGO_SHA224;
176
else if (ctx->flags & DRIVER_FLAGS_SHA256)
177
cr |= CR_CONTROL_ALGO_SHA256;
178
dev_dbg(hdev->dev, "Starting hash process\n");
179
img_hash_write(hdev, CR_CONTROL, cr);
180
181
/*
182
* The hardware block requires two cycles between writing the control
183
* register and writing the first word of data in non DMA mode, to
184
* ensure the first data write is not grouped in burst with the control
185
* register write a read is issued to 'flush' the bus.
186
*/
187
if (!dma)
188
img_hash_read(hdev, CR_CONTROL);
189
}
190
191
static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
192
size_t length, int final)
193
{
194
u32 count, len32;
195
const u32 *buffer = (const u32 *)buf;
196
197
dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
198
199
if (final)
200
hdev->flags |= DRIVER_FLAGS_FINAL;
201
202
len32 = DIV_ROUND_UP(length, sizeof(u32));
203
204
for (count = 0; count < len32; count++)
205
writel_relaxed(buffer[count], hdev->cpu_addr);
206
207
return -EINPROGRESS;
208
}
209
210
static void img_hash_dma_callback(void *data)
211
{
212
struct img_hash_dev *hdev = data;
213
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
214
215
if (ctx->bufcnt) {
216
img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
217
ctx->bufcnt = 0;
218
}
219
if (ctx->sg)
220
tasklet_schedule(&hdev->dma_task);
221
}
222
223
static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
224
{
225
struct dma_async_tx_descriptor *desc;
226
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
227
228
ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
229
if (ctx->dma_ct == 0) {
230
dev_err(hdev->dev, "Invalid DMA sg\n");
231
hdev->err = -EINVAL;
232
return -EINVAL;
233
}
234
235
desc = dmaengine_prep_slave_sg(hdev->dma_lch,
236
sg,
237
ctx->dma_ct,
238
DMA_MEM_TO_DEV,
239
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
240
if (!desc) {
241
dev_err(hdev->dev, "Null DMA descriptor\n");
242
hdev->err = -EINVAL;
243
dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
244
return -EINVAL;
245
}
246
desc->callback = img_hash_dma_callback;
247
desc->callback_param = hdev;
248
dmaengine_submit(desc);
249
dma_async_issue_pending(hdev->dma_lch);
250
251
return 0;
252
}
253
254
static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
255
{
256
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
257
258
ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
259
ctx->buffer, hdev->req->nbytes);
260
261
ctx->total = hdev->req->nbytes;
262
ctx->bufcnt = 0;
263
264
hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
265
266
img_hash_start(hdev, false);
267
268
return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
269
}
270
271
static int img_hash_finish(struct ahash_request *req)
272
{
273
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
274
275
if (!req->result)
276
return -EINVAL;
277
278
memcpy(req->result, ctx->digest, ctx->digsize);
279
280
return 0;
281
}
282
283
static void img_hash_copy_hash(struct ahash_request *req)
284
{
285
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
286
__be32 *hash = (__be32 *)ctx->digest;
287
int i;
288
289
for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
290
hash[i] = img_hash_read_result_queue(ctx->hdev);
291
}
292
293
static void img_hash_finish_req(struct ahash_request *req, int err)
294
{
295
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
296
struct img_hash_dev *hdev = ctx->hdev;
297
298
if (!err) {
299
img_hash_copy_hash(req);
300
if (DRIVER_FLAGS_FINAL & hdev->flags)
301
err = img_hash_finish(req);
302
} else {
303
dev_warn(hdev->dev, "Hash failed with error %d\n", err);
304
ctx->flags |= DRIVER_FLAGS_ERROR;
305
}
306
307
hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
308
DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
309
310
if (req->base.complete)
311
ahash_request_complete(req, err);
312
}
313
314
static int img_hash_write_via_dma(struct img_hash_dev *hdev)
315
{
316
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
317
318
img_hash_start(hdev, true);
319
320
dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
321
322
if (!ctx->total)
323
hdev->flags |= DRIVER_FLAGS_FINAL;
324
325
hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
326
327
tasklet_schedule(&hdev->dma_task);
328
329
return -EINPROGRESS;
330
}
331
332
static int img_hash_dma_init(struct img_hash_dev *hdev)
333
{
334
struct dma_slave_config dma_conf;
335
int err;
336
337
hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
338
if (IS_ERR(hdev->dma_lch)) {
339
dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
340
return PTR_ERR(hdev->dma_lch);
341
}
342
dma_conf.direction = DMA_MEM_TO_DEV;
343
dma_conf.dst_addr = hdev->bus_addr;
344
dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
345
dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
346
dma_conf.device_fc = false;
347
348
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
349
if (err) {
350
dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
351
dma_release_channel(hdev->dma_lch);
352
return err;
353
}
354
355
return 0;
356
}
357
358
static void img_hash_dma_task(unsigned long d)
359
{
360
struct img_hash_dev *hdev = (struct img_hash_dev *)d;
361
struct img_hash_request_ctx *ctx;
362
u8 *addr;
363
size_t nbytes, bleft, wsend, len, tbc;
364
struct scatterlist tsg;
365
366
if (!hdev->req)
367
return;
368
369
ctx = ahash_request_ctx(hdev->req);
370
if (!ctx->sg)
371
return;
372
373
addr = sg_virt(ctx->sg);
374
nbytes = ctx->sg->length - ctx->offset;
375
376
/*
377
* The hash accelerator does not support a data valid mask. This means
378
* that if each dma (i.e. per page) is not a multiple of 4 bytes, the
379
* padding bytes in the last word written by that dma would erroneously
380
* be included in the hash. To avoid this we round down the transfer,
381
* and add the excess to the start of the next dma. It does not matter
382
* that the final dma may not be a multiple of 4 bytes as the hashing
383
* block is programmed to accept the correct number of bytes.
384
*/
385
386
bleft = nbytes % 4;
387
wsend = (nbytes / 4);
388
389
if (wsend) {
390
sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
391
if (img_hash_xmit_dma(hdev, &tsg)) {
392
dev_err(hdev->dev, "DMA failed, falling back to CPU");
393
ctx->flags |= DRIVER_FLAGS_CPU;
394
hdev->err = 0;
395
img_hash_xmit_cpu(hdev, addr + ctx->offset,
396
wsend * 4, 0);
397
ctx->sent += wsend * 4;
398
wsend = 0;
399
} else {
400
ctx->sent += wsend * 4;
401
}
402
}
403
404
if (bleft) {
405
ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
406
ctx->buffer, bleft, ctx->sent);
407
tbc = 0;
408
ctx->sg = sg_next(ctx->sg);
409
while (ctx->sg && (ctx->bufcnt < 4)) {
410
len = ctx->sg->length;
411
if (likely(len > (4 - ctx->bufcnt)))
412
len = 4 - ctx->bufcnt;
413
tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
414
ctx->buffer + ctx->bufcnt, len,
415
ctx->sent + ctx->bufcnt);
416
ctx->bufcnt += tbc;
417
if (tbc >= ctx->sg->length) {
418
ctx->sg = sg_next(ctx->sg);
419
tbc = 0;
420
}
421
}
422
423
ctx->sent += ctx->bufcnt;
424
ctx->offset = tbc;
425
426
if (!wsend)
427
img_hash_dma_callback(hdev);
428
} else {
429
ctx->offset = 0;
430
ctx->sg = sg_next(ctx->sg);
431
}
432
}
433
434
static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
435
{
436
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
437
438
if (ctx->flags & DRIVER_FLAGS_SG)
439
dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
440
441
return 0;
442
}
443
444
static int img_hash_process_data(struct img_hash_dev *hdev)
445
{
446
struct ahash_request *req = hdev->req;
447
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
448
int err = 0;
449
450
ctx->bufcnt = 0;
451
452
if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
453
dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
454
req->nbytes);
455
err = img_hash_write_via_dma(hdev);
456
} else {
457
dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
458
req->nbytes);
459
err = img_hash_write_via_cpu(hdev);
460
}
461
return err;
462
}
463
464
static int img_hash_hw_init(struct img_hash_dev *hdev)
465
{
466
unsigned long long nbits;
467
u32 u, l;
468
469
img_hash_write(hdev, CR_RESET, CR_RESET_SET);
470
img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
471
img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
472
473
nbits = (u64)hdev->req->nbytes << 3;
474
u = nbits >> 32;
475
l = nbits;
476
img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
477
img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
478
479
if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
480
hdev->flags |= DRIVER_FLAGS_INIT;
481
hdev->err = 0;
482
}
483
dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
484
return 0;
485
}
486
487
static int img_hash_init(struct ahash_request *req)
488
{
489
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
490
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
491
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
492
493
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
494
ahash_request_set_callback(&rctx->fallback_req,
495
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
496
req->base.complete, req->base.data);
497
498
return crypto_ahash_init(&rctx->fallback_req);
499
}
500
501
static int img_hash_handle_queue(struct img_hash_dev *hdev,
502
struct ahash_request *req)
503
{
504
struct crypto_async_request *async_req, *backlog;
505
struct img_hash_request_ctx *ctx;
506
unsigned long flags;
507
int err = 0, res = 0;
508
509
spin_lock_irqsave(&hdev->lock, flags);
510
511
if (req)
512
res = ahash_enqueue_request(&hdev->queue, req);
513
514
if (DRIVER_FLAGS_BUSY & hdev->flags) {
515
spin_unlock_irqrestore(&hdev->lock, flags);
516
return res;
517
}
518
519
backlog = crypto_get_backlog(&hdev->queue);
520
async_req = crypto_dequeue_request(&hdev->queue);
521
if (async_req)
522
hdev->flags |= DRIVER_FLAGS_BUSY;
523
524
spin_unlock_irqrestore(&hdev->lock, flags);
525
526
if (!async_req)
527
return res;
528
529
if (backlog)
530
crypto_request_complete(backlog, -EINPROGRESS);
531
532
req = ahash_request_cast(async_req);
533
hdev->req = req;
534
535
ctx = ahash_request_ctx(req);
536
537
dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
538
ctx->op, req->nbytes);
539
540
err = img_hash_hw_init(hdev);
541
542
if (!err)
543
err = img_hash_process_data(hdev);
544
545
if (err != -EINPROGRESS) {
546
/* done_task will not finish so do it here */
547
img_hash_finish_req(req, err);
548
}
549
return res;
550
}
551
552
static int img_hash_update(struct ahash_request *req)
553
{
554
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
555
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
556
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
557
558
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
559
ahash_request_set_callback(&rctx->fallback_req,
560
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
561
req->base.complete, req->base.data);
562
ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
563
564
return crypto_ahash_update(&rctx->fallback_req);
565
}
566
567
static int img_hash_final(struct ahash_request *req)
568
{
569
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
570
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
571
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
572
573
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
574
ahash_request_set_callback(&rctx->fallback_req,
575
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
576
req->base.complete, req->base.data);
577
ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
578
579
return crypto_ahash_final(&rctx->fallback_req);
580
}
581
582
static int img_hash_finup(struct ahash_request *req)
583
{
584
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
585
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
586
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
587
588
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
589
ahash_request_set_callback(&rctx->fallback_req,
590
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
591
req->base.complete, req->base.data);
592
ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
593
req->nbytes);
594
595
596
return crypto_ahash_finup(&rctx->fallback_req);
597
}
598
599
static int img_hash_import(struct ahash_request *req, const void *in)
600
{
601
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
602
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
603
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
604
605
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
606
ahash_request_set_callback(&rctx->fallback_req,
607
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
608
req->base.complete, req->base.data);
609
610
return crypto_ahash_import(&rctx->fallback_req, in);
611
}
612
613
static int img_hash_export(struct ahash_request *req, void *out)
614
{
615
struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
616
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
617
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
618
619
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
620
ahash_request_set_callback(&rctx->fallback_req,
621
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
622
req->base.complete, req->base.data);
623
624
return crypto_ahash_export(&rctx->fallback_req, out);
625
}
626
627
static int img_hash_digest(struct ahash_request *req)
628
{
629
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
630
struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
631
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
632
struct img_hash_dev *hdev = NULL;
633
struct img_hash_dev *tmp;
634
int err;
635
636
spin_lock(&img_hash.lock);
637
if (!tctx->hdev) {
638
list_for_each_entry(tmp, &img_hash.dev_list, list) {
639
hdev = tmp;
640
break;
641
}
642
tctx->hdev = hdev;
643
644
} else {
645
hdev = tctx->hdev;
646
}
647
648
spin_unlock(&img_hash.lock);
649
ctx->hdev = hdev;
650
ctx->flags = 0;
651
ctx->digsize = crypto_ahash_digestsize(tfm);
652
653
switch (ctx->digsize) {
654
case SHA1_DIGEST_SIZE:
655
ctx->flags |= DRIVER_FLAGS_SHA1;
656
break;
657
case SHA256_DIGEST_SIZE:
658
ctx->flags |= DRIVER_FLAGS_SHA256;
659
break;
660
case SHA224_DIGEST_SIZE:
661
ctx->flags |= DRIVER_FLAGS_SHA224;
662
break;
663
case MD5_DIGEST_SIZE:
664
ctx->flags |= DRIVER_FLAGS_MD5;
665
break;
666
default:
667
return -EINVAL;
668
}
669
670
ctx->bufcnt = 0;
671
ctx->offset = 0;
672
ctx->sent = 0;
673
ctx->total = req->nbytes;
674
ctx->sg = req->src;
675
ctx->sgfirst = req->src;
676
ctx->nents = sg_nents(ctx->sg);
677
678
err = img_hash_handle_queue(tctx->hdev, req);
679
680
return err;
681
}
682
683
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
684
{
685
struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
686
687
ctx->fallback = crypto_alloc_ahash(alg_name, 0,
688
CRYPTO_ALG_NEED_FALLBACK);
689
if (IS_ERR(ctx->fallback)) {
690
pr_err("img_hash: Could not load fallback driver.\n");
691
return PTR_ERR(ctx->fallback);
692
}
693
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
694
sizeof(struct img_hash_request_ctx) +
695
crypto_ahash_reqsize(ctx->fallback) +
696
IMG_HASH_DMA_THRESHOLD);
697
698
return 0;
699
}
700
701
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
702
{
703
return img_hash_cra_init(tfm, "md5-generic");
704
}
705
706
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
707
{
708
return img_hash_cra_init(tfm, "sha1-lib");
709
}
710
711
static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
712
{
713
return img_hash_cra_init(tfm, "sha224-lib");
714
}
715
716
static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
717
{
718
return img_hash_cra_init(tfm, "sha256-lib");
719
}
720
721
static void img_hash_cra_exit(struct crypto_tfm *tfm)
722
{
723
struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
724
725
crypto_free_ahash(tctx->fallback);
726
}
727
728
static irqreturn_t img_irq_handler(int irq, void *dev_id)
729
{
730
struct img_hash_dev *hdev = dev_id;
731
u32 reg;
732
733
reg = img_hash_read(hdev, CR_INTSTAT);
734
img_hash_write(hdev, CR_INTCLEAR, reg);
735
736
if (reg & CR_INT_NEW_RESULTS_SET) {
737
dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
738
if (DRIVER_FLAGS_BUSY & hdev->flags) {
739
hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
740
if (!(DRIVER_FLAGS_CPU & hdev->flags))
741
hdev->flags |= DRIVER_FLAGS_DMA_READY;
742
tasklet_schedule(&hdev->done_task);
743
} else {
744
dev_warn(hdev->dev,
745
"HASH interrupt when no active requests.\n");
746
}
747
} else if (reg & CR_INT_RESULTS_AVAILABLE) {
748
dev_warn(hdev->dev,
749
"IRQ triggered before the hash had completed\n");
750
} else if (reg & CR_INT_RESULT_READ_ERR) {
751
dev_warn(hdev->dev,
752
"Attempt to read from an empty result queue\n");
753
} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
754
dev_warn(hdev->dev,
755
"Data written before the hardware was configured\n");
756
}
757
return IRQ_HANDLED;
758
}
759
760
static struct ahash_alg img_algs[] = {
761
{
762
.init = img_hash_init,
763
.update = img_hash_update,
764
.final = img_hash_final,
765
.finup = img_hash_finup,
766
.export = img_hash_export,
767
.import = img_hash_import,
768
.digest = img_hash_digest,
769
.halg = {
770
.digestsize = MD5_DIGEST_SIZE,
771
.statesize = sizeof(struct md5_state),
772
.base = {
773
.cra_name = "md5",
774
.cra_driver_name = "img-md5",
775
.cra_priority = 300,
776
.cra_flags =
777
CRYPTO_ALG_ASYNC |
778
CRYPTO_ALG_NEED_FALLBACK,
779
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
780
.cra_ctxsize = sizeof(struct img_hash_ctx),
781
.cra_init = img_hash_cra_md5_init,
782
.cra_exit = img_hash_cra_exit,
783
.cra_module = THIS_MODULE,
784
}
785
}
786
},
787
{
788
.init = img_hash_init,
789
.update = img_hash_update,
790
.final = img_hash_final,
791
.finup = img_hash_finup,
792
.export = img_hash_export,
793
.import = img_hash_import,
794
.digest = img_hash_digest,
795
.halg = {
796
.digestsize = SHA1_DIGEST_SIZE,
797
.statesize = sizeof(struct sha1_state),
798
.base = {
799
.cra_name = "sha1",
800
.cra_driver_name = "img-sha1",
801
.cra_priority = 300,
802
.cra_flags =
803
CRYPTO_ALG_ASYNC |
804
CRYPTO_ALG_NEED_FALLBACK,
805
.cra_blocksize = SHA1_BLOCK_SIZE,
806
.cra_ctxsize = sizeof(struct img_hash_ctx),
807
.cra_init = img_hash_cra_sha1_init,
808
.cra_exit = img_hash_cra_exit,
809
.cra_module = THIS_MODULE,
810
}
811
}
812
},
813
{
814
.init = img_hash_init,
815
.update = img_hash_update,
816
.final = img_hash_final,
817
.finup = img_hash_finup,
818
.export = img_hash_export,
819
.import = img_hash_import,
820
.digest = img_hash_digest,
821
.halg = {
822
.digestsize = SHA224_DIGEST_SIZE,
823
.statesize = sizeof(struct sha256_state),
824
.base = {
825
.cra_name = "sha224",
826
.cra_driver_name = "img-sha224",
827
.cra_priority = 300,
828
.cra_flags =
829
CRYPTO_ALG_ASYNC |
830
CRYPTO_ALG_NEED_FALLBACK,
831
.cra_blocksize = SHA224_BLOCK_SIZE,
832
.cra_ctxsize = sizeof(struct img_hash_ctx),
833
.cra_init = img_hash_cra_sha224_init,
834
.cra_exit = img_hash_cra_exit,
835
.cra_module = THIS_MODULE,
836
}
837
}
838
},
839
{
840
.init = img_hash_init,
841
.update = img_hash_update,
842
.final = img_hash_final,
843
.finup = img_hash_finup,
844
.export = img_hash_export,
845
.import = img_hash_import,
846
.digest = img_hash_digest,
847
.halg = {
848
.digestsize = SHA256_DIGEST_SIZE,
849
.statesize = sizeof(struct sha256_state),
850
.base = {
851
.cra_name = "sha256",
852
.cra_driver_name = "img-sha256",
853
.cra_priority = 300,
854
.cra_flags =
855
CRYPTO_ALG_ASYNC |
856
CRYPTO_ALG_NEED_FALLBACK,
857
.cra_blocksize = SHA256_BLOCK_SIZE,
858
.cra_ctxsize = sizeof(struct img_hash_ctx),
859
.cra_init = img_hash_cra_sha256_init,
860
.cra_exit = img_hash_cra_exit,
861
.cra_module = THIS_MODULE,
862
}
863
}
864
}
865
};
866
867
static int img_register_algs(struct img_hash_dev *hdev)
868
{
869
int i, err;
870
871
for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
872
err = crypto_register_ahash(&img_algs[i]);
873
if (err)
874
goto err_reg;
875
}
876
return 0;
877
878
err_reg:
879
for (; i--; )
880
crypto_unregister_ahash(&img_algs[i]);
881
882
return err;
883
}
884
885
static int img_unregister_algs(struct img_hash_dev *hdev)
886
{
887
int i;
888
889
for (i = 0; i < ARRAY_SIZE(img_algs); i++)
890
crypto_unregister_ahash(&img_algs[i]);
891
return 0;
892
}
893
894
static void img_hash_done_task(unsigned long data)
895
{
896
struct img_hash_dev *hdev = (struct img_hash_dev *)data;
897
int err = 0;
898
899
if (hdev->err == -EINVAL) {
900
err = hdev->err;
901
goto finish;
902
}
903
904
if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
905
img_hash_handle_queue(hdev, NULL);
906
return;
907
}
908
909
if (DRIVER_FLAGS_CPU & hdev->flags) {
910
if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
911
hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
912
goto finish;
913
}
914
} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
915
if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
916
hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
917
img_hash_write_via_dma_stop(hdev);
918
if (hdev->err) {
919
err = hdev->err;
920
goto finish;
921
}
922
}
923
if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
924
hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
925
DRIVER_FLAGS_OUTPUT_READY);
926
goto finish;
927
}
928
}
929
return;
930
931
finish:
932
img_hash_finish_req(hdev->req, err);
933
}
934
935
static const struct of_device_id img_hash_match[] __maybe_unused = {
936
{ .compatible = "img,hash-accelerator" },
937
{}
938
};
939
MODULE_DEVICE_TABLE(of, img_hash_match);
940
941
static int img_hash_probe(struct platform_device *pdev)
942
{
943
struct img_hash_dev *hdev;
944
struct device *dev = &pdev->dev;
945
struct resource *hash_res;
946
int irq;
947
int err;
948
949
hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
950
if (hdev == NULL)
951
return -ENOMEM;
952
953
spin_lock_init(&hdev->lock);
954
955
hdev->dev = dev;
956
957
platform_set_drvdata(pdev, hdev);
958
959
INIT_LIST_HEAD(&hdev->list);
960
961
tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
962
tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
963
964
crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
965
966
/* Register bank */
967
hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
968
if (IS_ERR(hdev->io_base)) {
969
err = PTR_ERR(hdev->io_base);
970
goto res_err;
971
}
972
973
/* Write port (DMA or CPU) */
974
hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
975
if (IS_ERR(hdev->cpu_addr)) {
976
err = PTR_ERR(hdev->cpu_addr);
977
goto res_err;
978
}
979
hdev->bus_addr = hash_res->start;
980
981
irq = platform_get_irq(pdev, 0);
982
if (irq < 0) {
983
err = irq;
984
goto res_err;
985
}
986
987
err = devm_request_irq(dev, irq, img_irq_handler, 0,
988
dev_name(dev), hdev);
989
if (err) {
990
dev_err(dev, "unable to request irq\n");
991
goto res_err;
992
}
993
dev_dbg(dev, "using IRQ channel %d\n", irq);
994
995
hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash");
996
if (IS_ERR(hdev->hash_clk)) {
997
dev_err(dev, "clock initialization failed.\n");
998
err = PTR_ERR(hdev->hash_clk);
999
goto res_err;
1000
}
1001
1002
hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys");
1003
if (IS_ERR(hdev->sys_clk)) {
1004
dev_err(dev, "clock initialization failed.\n");
1005
err = PTR_ERR(hdev->sys_clk);
1006
goto res_err;
1007
}
1008
1009
err = img_hash_dma_init(hdev);
1010
if (err)
1011
goto res_err;
1012
1013
dev_dbg(dev, "using %s for DMA transfers\n",
1014
dma_chan_name(hdev->dma_lch));
1015
1016
spin_lock(&img_hash.lock);
1017
list_add_tail(&hdev->list, &img_hash.dev_list);
1018
spin_unlock(&img_hash.lock);
1019
1020
err = img_register_algs(hdev);
1021
if (err)
1022
goto err_algs;
1023
dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1024
1025
return 0;
1026
1027
err_algs:
1028
spin_lock(&img_hash.lock);
1029
list_del(&hdev->list);
1030
spin_unlock(&img_hash.lock);
1031
dma_release_channel(hdev->dma_lch);
1032
res_err:
1033
tasklet_kill(&hdev->done_task);
1034
tasklet_kill(&hdev->dma_task);
1035
1036
return err;
1037
}
1038
1039
static void img_hash_remove(struct platform_device *pdev)
1040
{
1041
struct img_hash_dev *hdev;
1042
1043
hdev = platform_get_drvdata(pdev);
1044
spin_lock(&img_hash.lock);
1045
list_del(&hdev->list);
1046
spin_unlock(&img_hash.lock);
1047
1048
img_unregister_algs(hdev);
1049
1050
tasklet_kill(&hdev->done_task);
1051
tasklet_kill(&hdev->dma_task);
1052
1053
dma_release_channel(hdev->dma_lch);
1054
}
1055
1056
#ifdef CONFIG_PM_SLEEP
1057
static int img_hash_suspend(struct device *dev)
1058
{
1059
struct img_hash_dev *hdev = dev_get_drvdata(dev);
1060
1061
clk_disable_unprepare(hdev->hash_clk);
1062
clk_disable_unprepare(hdev->sys_clk);
1063
1064
return 0;
1065
}
1066
1067
static int img_hash_resume(struct device *dev)
1068
{
1069
struct img_hash_dev *hdev = dev_get_drvdata(dev);
1070
int ret;
1071
1072
ret = clk_prepare_enable(hdev->hash_clk);
1073
if (ret)
1074
return ret;
1075
1076
ret = clk_prepare_enable(hdev->sys_clk);
1077
if (ret) {
1078
clk_disable_unprepare(hdev->hash_clk);
1079
return ret;
1080
}
1081
1082
return 0;
1083
}
1084
#endif /* CONFIG_PM_SLEEP */
1085
1086
static const struct dev_pm_ops img_hash_pm_ops = {
1087
SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1088
};
1089
1090
static struct platform_driver img_hash_driver = {
1091
.probe = img_hash_probe,
1092
.remove = img_hash_remove,
1093
.driver = {
1094
.name = "img-hash-accelerator",
1095
.pm = &img_hash_pm_ops,
1096
.of_match_table = img_hash_match,
1097
}
1098
};
1099
module_platform_driver(img_hash_driver);
1100
1101
MODULE_LICENSE("GPL v2");
1102
MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1103
MODULE_AUTHOR("Will Thomas.");
1104
MODULE_AUTHOR("James Hartley <[email protected]>");
1105
1106