Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/crypto/omap-sham.c
15109 views
1
/*
2
* Cryptographic API.
3
*
4
* Support for OMAP SHA1/MD5 HW acceleration.
5
*
6
* Copyright (c) 2010 Nokia Corporation
7
* Author: Dmitry Kasatkin <[email protected]>
8
*
9
* This program is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License version 2 as published
11
* by the Free Software Foundation.
12
*
13
* Some ideas are from old omap-sha1-md5.c driver.
14
*/
15
16
#define pr_fmt(fmt) "%s: " fmt, __func__
17
18
#include <linux/err.h>
19
#include <linux/device.h>
20
#include <linux/module.h>
21
#include <linux/init.h>
22
#include <linux/errno.h>
23
#include <linux/interrupt.h>
24
#include <linux/kernel.h>
25
#include <linux/clk.h>
26
#include <linux/irq.h>
27
#include <linux/io.h>
28
#include <linux/platform_device.h>
29
#include <linux/scatterlist.h>
30
#include <linux/dma-mapping.h>
31
#include <linux/delay.h>
32
#include <linux/crypto.h>
33
#include <linux/cryptohash.h>
34
#include <crypto/scatterwalk.h>
35
#include <crypto/algapi.h>
36
#include <crypto/sha.h>
37
#include <crypto/hash.h>
38
#include <crypto/internal/hash.h>
39
40
#include <plat/cpu.h>
41
#include <plat/dma.h>
42
#include <mach/irqs.h>
43
44
#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45
#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
46
47
#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48
#define MD5_DIGEST_SIZE 16
49
50
#define SHA_REG_DIGCNT 0x14
51
52
#define SHA_REG_CTRL 0x18
53
#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
54
#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
55
#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
56
#define SHA_REG_CTRL_ALGO (1 << 2)
57
#define SHA_REG_CTRL_INPUT_READY (1 << 1)
58
#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
59
60
#define SHA_REG_REV 0x5C
61
#define SHA_REG_REV_MAJOR 0xF0
62
#define SHA_REG_REV_MINOR 0x0F
63
64
#define SHA_REG_MASK 0x60
65
#define SHA_REG_MASK_DMA_EN (1 << 3)
66
#define SHA_REG_MASK_IT_EN (1 << 2)
67
#define SHA_REG_MASK_SOFTRESET (1 << 1)
68
#define SHA_REG_AUTOIDLE (1 << 0)
69
70
#define SHA_REG_SYSSTATUS 0x64
71
#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
72
73
#define DEFAULT_TIMEOUT_INTERVAL HZ
74
75
#define FLAGS_FINUP 0x0002
76
#define FLAGS_FINAL 0x0004
77
#define FLAGS_SG 0x0008
78
#define FLAGS_SHA1 0x0010
79
#define FLAGS_DMA_ACTIVE 0x0020
80
#define FLAGS_OUTPUT_READY 0x0040
81
#define FLAGS_INIT 0x0100
82
#define FLAGS_CPU 0x0200
83
#define FLAGS_HMAC 0x0400
84
#define FLAGS_ERROR 0x0800
85
#define FLAGS_BUSY 0x1000
86
87
#define OP_UPDATE 1
88
#define OP_FINAL 2
89
90
#define OMAP_ALIGN_MASK (sizeof(u32)-1)
91
#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
92
93
#define BUFLEN PAGE_SIZE
94
95
struct omap_sham_dev;
96
97
struct omap_sham_reqctx {
98
struct omap_sham_dev *dd;
99
unsigned long flags;
100
unsigned long op;
101
102
u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
103
size_t digcnt;
104
size_t bufcnt;
105
size_t buflen;
106
dma_addr_t dma_addr;
107
108
/* walk state */
109
struct scatterlist *sg;
110
unsigned int offset; /* offset in current sg */
111
unsigned int total; /* total request */
112
113
u8 buffer[0] OMAP_ALIGNED;
114
};
115
116
struct omap_sham_hmac_ctx {
117
struct crypto_shash *shash;
118
u8 ipad[SHA1_MD5_BLOCK_SIZE];
119
u8 opad[SHA1_MD5_BLOCK_SIZE];
120
};
121
122
struct omap_sham_ctx {
123
struct omap_sham_dev *dd;
124
125
unsigned long flags;
126
127
/* fallback stuff */
128
struct crypto_shash *fallback;
129
130
struct omap_sham_hmac_ctx base[0];
131
};
132
133
#define OMAP_SHAM_QUEUE_LENGTH 1
134
135
struct omap_sham_dev {
136
struct list_head list;
137
unsigned long phys_base;
138
struct device *dev;
139
void __iomem *io_base;
140
int irq;
141
struct clk *iclk;
142
spinlock_t lock;
143
int err;
144
int dma;
145
int dma_lch;
146
struct tasklet_struct done_task;
147
struct tasklet_struct queue_task;
148
149
unsigned long flags;
150
struct crypto_queue queue;
151
struct ahash_request *req;
152
};
153
154
struct omap_sham_drv {
155
struct list_head dev_list;
156
spinlock_t lock;
157
unsigned long flags;
158
};
159
160
static struct omap_sham_drv sham = {
161
.dev_list = LIST_HEAD_INIT(sham.dev_list),
162
.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
163
};
164
165
static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
166
{
167
return __raw_readl(dd->io_base + offset);
168
}
169
170
static inline void omap_sham_write(struct omap_sham_dev *dd,
171
u32 offset, u32 value)
172
{
173
__raw_writel(value, dd->io_base + offset);
174
}
175
176
static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
177
u32 value, u32 mask)
178
{
179
u32 val;
180
181
val = omap_sham_read(dd, address);
182
val &= ~mask;
183
val |= value;
184
omap_sham_write(dd, address, val);
185
}
186
187
static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
188
{
189
unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
190
191
while (!(omap_sham_read(dd, offset) & bit)) {
192
if (time_is_before_jiffies(timeout))
193
return -ETIMEDOUT;
194
}
195
196
return 0;
197
}
198
199
static void omap_sham_copy_hash(struct ahash_request *req, int out)
200
{
201
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
202
u32 *hash = (u32 *)ctx->digest;
203
int i;
204
205
/* MD5 is almost unused. So copy sha1 size to reduce code */
206
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
207
if (out)
208
hash[i] = omap_sham_read(ctx->dd,
209
SHA_REG_DIGEST(i));
210
else
211
omap_sham_write(ctx->dd,
212
SHA_REG_DIGEST(i), hash[i]);
213
}
214
}
215
216
static void omap_sham_copy_ready_hash(struct ahash_request *req)
217
{
218
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
219
u32 *in = (u32 *)ctx->digest;
220
u32 *hash = (u32 *)req->result;
221
int i;
222
223
if (!hash)
224
return;
225
226
if (likely(ctx->flags & FLAGS_SHA1)) {
227
/* SHA1 results are in big endian */
228
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
229
hash[i] = be32_to_cpu(in[i]);
230
} else {
231
/* MD5 results are in little endian */
232
for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
233
hash[i] = le32_to_cpu(in[i]);
234
}
235
}
236
237
static int omap_sham_hw_init(struct omap_sham_dev *dd)
238
{
239
clk_enable(dd->iclk);
240
241
if (!(dd->flags & FLAGS_INIT)) {
242
omap_sham_write_mask(dd, SHA_REG_MASK,
243
SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
244
245
if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
246
SHA_REG_SYSSTATUS_RESETDONE))
247
return -ETIMEDOUT;
248
249
dd->flags |= FLAGS_INIT;
250
dd->err = 0;
251
}
252
253
return 0;
254
}
255
256
static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
257
int final, int dma)
258
{
259
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
260
u32 val = length << 5, mask;
261
262
if (likely(ctx->digcnt))
263
omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
264
265
omap_sham_write_mask(dd, SHA_REG_MASK,
266
SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
267
SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
268
/*
269
* Setting ALGO_CONST only for the first iteration
270
* and CLOSE_HASH only for the last one.
271
*/
272
if (ctx->flags & FLAGS_SHA1)
273
val |= SHA_REG_CTRL_ALGO;
274
if (!ctx->digcnt)
275
val |= SHA_REG_CTRL_ALGO_CONST;
276
if (final)
277
val |= SHA_REG_CTRL_CLOSE_HASH;
278
279
mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
280
SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
281
282
omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
283
}
284
285
static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
286
size_t length, int final)
287
{
288
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
289
int count, len32;
290
const u32 *buffer = (const u32 *)buf;
291
292
dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
293
ctx->digcnt, length, final);
294
295
omap_sham_write_ctrl(dd, length, final, 0);
296
297
/* should be non-zero before next lines to disable clocks later */
298
ctx->digcnt += length;
299
300
if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
301
return -ETIMEDOUT;
302
303
if (final)
304
ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
305
306
len32 = DIV_ROUND_UP(length, sizeof(u32));
307
308
for (count = 0; count < len32; count++)
309
omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
310
311
return -EINPROGRESS;
312
}
313
314
static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
315
size_t length, int final)
316
{
317
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
318
int len32;
319
320
dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
321
ctx->digcnt, length, final);
322
323
len32 = DIV_ROUND_UP(length, sizeof(u32));
324
325
omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
326
1, OMAP_DMA_SYNC_PACKET, dd->dma,
327
OMAP_DMA_DST_SYNC_PREFETCH);
328
329
omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
330
dma_addr, 0, 0);
331
332
omap_sham_write_ctrl(dd, length, final, 1);
333
334
ctx->digcnt += length;
335
336
if (final)
337
ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
338
339
dd->flags |= FLAGS_DMA_ACTIVE;
340
341
omap_start_dma(dd->dma_lch);
342
343
return -EINPROGRESS;
344
}
345
346
static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
347
const u8 *data, size_t length)
348
{
349
size_t count = min(length, ctx->buflen - ctx->bufcnt);
350
351
count = min(count, ctx->total);
352
if (count <= 0)
353
return 0;
354
memcpy(ctx->buffer + ctx->bufcnt, data, count);
355
ctx->bufcnt += count;
356
357
return count;
358
}
359
360
static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
361
{
362
size_t count;
363
364
while (ctx->sg) {
365
count = omap_sham_append_buffer(ctx,
366
sg_virt(ctx->sg) + ctx->offset,
367
ctx->sg->length - ctx->offset);
368
if (!count)
369
break;
370
ctx->offset += count;
371
ctx->total -= count;
372
if (ctx->offset == ctx->sg->length) {
373
ctx->sg = sg_next(ctx->sg);
374
if (ctx->sg)
375
ctx->offset = 0;
376
else
377
ctx->total = 0;
378
}
379
}
380
381
return 0;
382
}
383
384
static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
385
struct omap_sham_reqctx *ctx,
386
size_t length, int final)
387
{
388
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
389
DMA_TO_DEVICE);
390
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
391
dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
392
return -EINVAL;
393
}
394
395
ctx->flags &= ~FLAGS_SG;
396
397
/* next call does not fail... so no unmap in the case of error */
398
return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
399
}
400
401
static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
402
{
403
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
404
unsigned int final;
405
size_t count;
406
407
omap_sham_append_sg(ctx);
408
409
final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
410
411
dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
412
ctx->bufcnt, ctx->digcnt, final);
413
414
if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
415
count = ctx->bufcnt;
416
ctx->bufcnt = 0;
417
return omap_sham_xmit_dma_map(dd, ctx, count, final);
418
}
419
420
return 0;
421
}
422
423
/* Start address alignment */
424
#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
425
/* SHA1 block size alignment */
426
#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
427
428
static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
429
{
430
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
431
unsigned int length, final, tail;
432
struct scatterlist *sg;
433
434
if (!ctx->total)
435
return 0;
436
437
if (ctx->bufcnt || ctx->offset)
438
return omap_sham_update_dma_slow(dd);
439
440
dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
441
ctx->digcnt, ctx->bufcnt, ctx->total);
442
443
sg = ctx->sg;
444
445
if (!SG_AA(sg))
446
return omap_sham_update_dma_slow(dd);
447
448
if (!sg_is_last(sg) && !SG_SA(sg))
449
/* size is not SHA1_BLOCK_SIZE aligned */
450
return omap_sham_update_dma_slow(dd);
451
452
length = min(ctx->total, sg->length);
453
454
if (sg_is_last(sg)) {
455
if (!(ctx->flags & FLAGS_FINUP)) {
456
/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
457
tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
458
/* without finup() we need one block to close hash */
459
if (!tail)
460
tail = SHA1_MD5_BLOCK_SIZE;
461
length -= tail;
462
}
463
}
464
465
if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
466
dev_err(dd->dev, "dma_map_sg error\n");
467
return -EINVAL;
468
}
469
470
ctx->flags |= FLAGS_SG;
471
472
ctx->total -= length;
473
ctx->offset = length; /* offset where to start slow */
474
475
final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
476
477
/* next call does not fail... so no unmap in the case of error */
478
return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
479
}
480
481
static int omap_sham_update_cpu(struct omap_sham_dev *dd)
482
{
483
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
484
int bufcnt;
485
486
omap_sham_append_sg(ctx);
487
bufcnt = ctx->bufcnt;
488
ctx->bufcnt = 0;
489
490
return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
491
}
492
493
static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
494
{
495
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
496
497
omap_stop_dma(dd->dma_lch);
498
if (ctx->flags & FLAGS_SG) {
499
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
500
if (ctx->sg->length == ctx->offset) {
501
ctx->sg = sg_next(ctx->sg);
502
if (ctx->sg)
503
ctx->offset = 0;
504
}
505
} else {
506
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
507
DMA_TO_DEVICE);
508
}
509
510
return 0;
511
}
512
513
static int omap_sham_init(struct ahash_request *req)
514
{
515
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
516
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
517
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
518
struct omap_sham_dev *dd = NULL, *tmp;
519
520
spin_lock_bh(&sham.lock);
521
if (!tctx->dd) {
522
list_for_each_entry(tmp, &sham.dev_list, list) {
523
dd = tmp;
524
break;
525
}
526
tctx->dd = dd;
527
} else {
528
dd = tctx->dd;
529
}
530
spin_unlock_bh(&sham.lock);
531
532
ctx->dd = dd;
533
534
ctx->flags = 0;
535
536
dev_dbg(dd->dev, "init: digest size: %d\n",
537
crypto_ahash_digestsize(tfm));
538
539
if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
540
ctx->flags |= FLAGS_SHA1;
541
542
ctx->bufcnt = 0;
543
ctx->digcnt = 0;
544
ctx->buflen = BUFLEN;
545
546
if (tctx->flags & FLAGS_HMAC) {
547
struct omap_sham_hmac_ctx *bctx = tctx->base;
548
549
memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
550
ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
551
ctx->flags |= FLAGS_HMAC;
552
}
553
554
return 0;
555
556
}
557
558
static int omap_sham_update_req(struct omap_sham_dev *dd)
559
{
560
struct ahash_request *req = dd->req;
561
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
562
int err;
563
564
dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
565
ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
566
567
if (ctx->flags & FLAGS_CPU)
568
err = omap_sham_update_cpu(dd);
569
else
570
err = omap_sham_update_dma_start(dd);
571
572
/* wait for dma completion before can take more data */
573
dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
574
575
return err;
576
}
577
578
static int omap_sham_final_req(struct omap_sham_dev *dd)
579
{
580
struct ahash_request *req = dd->req;
581
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
582
int err = 0, use_dma = 1;
583
584
if (ctx->bufcnt <= 64)
585
/* faster to handle last block with cpu */
586
use_dma = 0;
587
588
if (use_dma)
589
err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
590
else
591
err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
592
593
ctx->bufcnt = 0;
594
595
dev_dbg(dd->dev, "final_req: err: %d\n", err);
596
597
return err;
598
}
599
600
static int omap_sham_finish_hmac(struct ahash_request *req)
601
{
602
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
603
struct omap_sham_hmac_ctx *bctx = tctx->base;
604
int bs = crypto_shash_blocksize(bctx->shash);
605
int ds = crypto_shash_digestsize(bctx->shash);
606
struct {
607
struct shash_desc shash;
608
char ctx[crypto_shash_descsize(bctx->shash)];
609
} desc;
610
611
desc.shash.tfm = bctx->shash;
612
desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
613
614
return crypto_shash_init(&desc.shash) ?:
615
crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
616
crypto_shash_finup(&desc.shash, req->result, ds, req->result);
617
}
618
619
static int omap_sham_finish(struct ahash_request *req)
620
{
621
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
622
struct omap_sham_dev *dd = ctx->dd;
623
int err = 0;
624
625
if (ctx->digcnt) {
626
omap_sham_copy_ready_hash(req);
627
if (ctx->flags & FLAGS_HMAC)
628
err = omap_sham_finish_hmac(req);
629
}
630
631
dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
632
633
return err;
634
}
635
636
static void omap_sham_finish_req(struct ahash_request *req, int err)
637
{
638
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
639
struct omap_sham_dev *dd = ctx->dd;
640
641
if (!err) {
642
omap_sham_copy_hash(ctx->dd->req, 1);
643
if (ctx->flags & FLAGS_FINAL)
644
err = omap_sham_finish(req);
645
} else {
646
ctx->flags |= FLAGS_ERROR;
647
}
648
649
clk_disable(dd->iclk);
650
dd->flags &= ~FLAGS_BUSY;
651
652
if (req->base.complete)
653
req->base.complete(&req->base, err);
654
}
655
656
static int omap_sham_handle_queue(struct omap_sham_dev *dd,
657
struct ahash_request *req)
658
{
659
struct crypto_async_request *async_req, *backlog;
660
struct omap_sham_reqctx *ctx;
661
struct ahash_request *prev_req;
662
unsigned long flags;
663
int err = 0, ret = 0;
664
665
spin_lock_irqsave(&dd->lock, flags);
666
if (req)
667
ret = ahash_enqueue_request(&dd->queue, req);
668
if (dd->flags & FLAGS_BUSY) {
669
spin_unlock_irqrestore(&dd->lock, flags);
670
return ret;
671
}
672
backlog = crypto_get_backlog(&dd->queue);
673
async_req = crypto_dequeue_request(&dd->queue);
674
if (async_req)
675
dd->flags |= FLAGS_BUSY;
676
spin_unlock_irqrestore(&dd->lock, flags);
677
678
if (!async_req)
679
return ret;
680
681
if (backlog)
682
backlog->complete(backlog, -EINPROGRESS);
683
684
req = ahash_request_cast(async_req);
685
686
prev_req = dd->req;
687
dd->req = req;
688
689
ctx = ahash_request_ctx(req);
690
691
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
692
ctx->op, req->nbytes);
693
694
695
err = omap_sham_hw_init(dd);
696
if (err)
697
goto err1;
698
699
omap_set_dma_dest_params(dd->dma_lch, 0,
700
OMAP_DMA_AMODE_CONSTANT,
701
dd->phys_base + SHA_REG_DIN(0), 0, 16);
702
703
omap_set_dma_dest_burst_mode(dd->dma_lch,
704
OMAP_DMA_DATA_BURST_16);
705
706
omap_set_dma_src_burst_mode(dd->dma_lch,
707
OMAP_DMA_DATA_BURST_4);
708
709
if (ctx->digcnt)
710
/* request has changed - restore hash */
711
omap_sham_copy_hash(req, 0);
712
713
if (ctx->op == OP_UPDATE) {
714
err = omap_sham_update_req(dd);
715
if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
716
/* no final() after finup() */
717
err = omap_sham_final_req(dd);
718
} else if (ctx->op == OP_FINAL) {
719
err = omap_sham_final_req(dd);
720
}
721
err1:
722
if (err != -EINPROGRESS) {
723
/* done_task will not finish it, so do it here */
724
omap_sham_finish_req(req, err);
725
tasklet_schedule(&dd->queue_task);
726
}
727
728
dev_dbg(dd->dev, "exit, err: %d\n", err);
729
730
return ret;
731
}
732
733
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
734
{
735
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
736
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
737
struct omap_sham_dev *dd = tctx->dd;
738
739
ctx->op = op;
740
741
return omap_sham_handle_queue(dd, req);
742
}
743
744
static int omap_sham_update(struct ahash_request *req)
745
{
746
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
747
748
if (!req->nbytes)
749
return 0;
750
751
ctx->total = req->nbytes;
752
ctx->sg = req->src;
753
ctx->offset = 0;
754
755
if (ctx->flags & FLAGS_FINUP) {
756
if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
757
/*
758
* OMAP HW accel works only with buffers >= 9
759
* will switch to bypass in final()
760
* final has the same request and data
761
*/
762
omap_sham_append_sg(ctx);
763
return 0;
764
} else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
765
/*
766
* faster to use CPU for short transfers
767
*/
768
ctx->flags |= FLAGS_CPU;
769
}
770
} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
771
omap_sham_append_sg(ctx);
772
return 0;
773
}
774
775
return omap_sham_enqueue(req, OP_UPDATE);
776
}
777
778
static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
779
const u8 *data, unsigned int len, u8 *out)
780
{
781
struct {
782
struct shash_desc shash;
783
char ctx[crypto_shash_descsize(shash)];
784
} desc;
785
786
desc.shash.tfm = shash;
787
desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
788
789
return crypto_shash_digest(&desc.shash, data, len, out);
790
}
791
792
static int omap_sham_final_shash(struct ahash_request *req)
793
{
794
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
795
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
796
797
return omap_sham_shash_digest(tctx->fallback, req->base.flags,
798
ctx->buffer, ctx->bufcnt, req->result);
799
}
800
801
static int omap_sham_final(struct ahash_request *req)
802
{
803
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
804
805
ctx->flags |= FLAGS_FINUP;
806
807
if (ctx->flags & FLAGS_ERROR)
808
return 0; /* uncompleted hash is not needed */
809
810
/* OMAP HW accel works only with buffers >= 9 */
811
/* HMAC is always >= 9 because ipad == block size */
812
if ((ctx->digcnt + ctx->bufcnt) < 9)
813
return omap_sham_final_shash(req);
814
else if (ctx->bufcnt)
815
return omap_sham_enqueue(req, OP_FINAL);
816
817
/* copy ready hash (+ finalize hmac) */
818
return omap_sham_finish(req);
819
}
820
821
static int omap_sham_finup(struct ahash_request *req)
822
{
823
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
824
int err1, err2;
825
826
ctx->flags |= FLAGS_FINUP;
827
828
err1 = omap_sham_update(req);
829
if (err1 == -EINPROGRESS || err1 == -EBUSY)
830
return err1;
831
/*
832
* final() has to be always called to cleanup resources
833
* even if udpate() failed, except EINPROGRESS
834
*/
835
err2 = omap_sham_final(req);
836
837
return err1 ?: err2;
838
}
839
840
static int omap_sham_digest(struct ahash_request *req)
841
{
842
return omap_sham_init(req) ?: omap_sham_finup(req);
843
}
844
845
static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
846
unsigned int keylen)
847
{
848
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
849
struct omap_sham_hmac_ctx *bctx = tctx->base;
850
int bs = crypto_shash_blocksize(bctx->shash);
851
int ds = crypto_shash_digestsize(bctx->shash);
852
int err, i;
853
err = crypto_shash_setkey(tctx->fallback, key, keylen);
854
if (err)
855
return err;
856
857
if (keylen > bs) {
858
err = omap_sham_shash_digest(bctx->shash,
859
crypto_shash_get_flags(bctx->shash),
860
key, keylen, bctx->ipad);
861
if (err)
862
return err;
863
keylen = ds;
864
} else {
865
memcpy(bctx->ipad, key, keylen);
866
}
867
868
memset(bctx->ipad + keylen, 0, bs - keylen);
869
memcpy(bctx->opad, bctx->ipad, bs);
870
871
for (i = 0; i < bs; i++) {
872
bctx->ipad[i] ^= 0x36;
873
bctx->opad[i] ^= 0x5c;
874
}
875
876
return err;
877
}
878
879
static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
880
{
881
struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
882
const char *alg_name = crypto_tfm_alg_name(tfm);
883
884
/* Allocate a fallback and abort if it failed. */
885
tctx->fallback = crypto_alloc_shash(alg_name, 0,
886
CRYPTO_ALG_NEED_FALLBACK);
887
if (IS_ERR(tctx->fallback)) {
888
pr_err("omap-sham: fallback driver '%s' "
889
"could not be loaded.\n", alg_name);
890
return PTR_ERR(tctx->fallback);
891
}
892
893
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
894
sizeof(struct omap_sham_reqctx) + BUFLEN);
895
896
if (alg_base) {
897
struct omap_sham_hmac_ctx *bctx = tctx->base;
898
tctx->flags |= FLAGS_HMAC;
899
bctx->shash = crypto_alloc_shash(alg_base, 0,
900
CRYPTO_ALG_NEED_FALLBACK);
901
if (IS_ERR(bctx->shash)) {
902
pr_err("omap-sham: base driver '%s' "
903
"could not be loaded.\n", alg_base);
904
crypto_free_shash(tctx->fallback);
905
return PTR_ERR(bctx->shash);
906
}
907
908
}
909
910
return 0;
911
}
912
913
static int omap_sham_cra_init(struct crypto_tfm *tfm)
914
{
915
return omap_sham_cra_init_alg(tfm, NULL);
916
}
917
918
static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
919
{
920
return omap_sham_cra_init_alg(tfm, "sha1");
921
}
922
923
static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
924
{
925
return omap_sham_cra_init_alg(tfm, "md5");
926
}
927
928
static void omap_sham_cra_exit(struct crypto_tfm *tfm)
929
{
930
struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
931
932
crypto_free_shash(tctx->fallback);
933
tctx->fallback = NULL;
934
935
if (tctx->flags & FLAGS_HMAC) {
936
struct omap_sham_hmac_ctx *bctx = tctx->base;
937
crypto_free_shash(bctx->shash);
938
}
939
}
940
941
static struct ahash_alg algs[] = {
942
{
943
.init = omap_sham_init,
944
.update = omap_sham_update,
945
.final = omap_sham_final,
946
.finup = omap_sham_finup,
947
.digest = omap_sham_digest,
948
.halg.digestsize = SHA1_DIGEST_SIZE,
949
.halg.base = {
950
.cra_name = "sha1",
951
.cra_driver_name = "omap-sha1",
952
.cra_priority = 100,
953
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
954
CRYPTO_ALG_ASYNC |
955
CRYPTO_ALG_NEED_FALLBACK,
956
.cra_blocksize = SHA1_BLOCK_SIZE,
957
.cra_ctxsize = sizeof(struct omap_sham_ctx),
958
.cra_alignmask = 0,
959
.cra_module = THIS_MODULE,
960
.cra_init = omap_sham_cra_init,
961
.cra_exit = omap_sham_cra_exit,
962
}
963
},
964
{
965
.init = omap_sham_init,
966
.update = omap_sham_update,
967
.final = omap_sham_final,
968
.finup = omap_sham_finup,
969
.digest = omap_sham_digest,
970
.halg.digestsize = MD5_DIGEST_SIZE,
971
.halg.base = {
972
.cra_name = "md5",
973
.cra_driver_name = "omap-md5",
974
.cra_priority = 100,
975
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
976
CRYPTO_ALG_ASYNC |
977
CRYPTO_ALG_NEED_FALLBACK,
978
.cra_blocksize = SHA1_BLOCK_SIZE,
979
.cra_ctxsize = sizeof(struct omap_sham_ctx),
980
.cra_alignmask = OMAP_ALIGN_MASK,
981
.cra_module = THIS_MODULE,
982
.cra_init = omap_sham_cra_init,
983
.cra_exit = omap_sham_cra_exit,
984
}
985
},
986
{
987
.init = omap_sham_init,
988
.update = omap_sham_update,
989
.final = omap_sham_final,
990
.finup = omap_sham_finup,
991
.digest = omap_sham_digest,
992
.setkey = omap_sham_setkey,
993
.halg.digestsize = SHA1_DIGEST_SIZE,
994
.halg.base = {
995
.cra_name = "hmac(sha1)",
996
.cra_driver_name = "omap-hmac-sha1",
997
.cra_priority = 100,
998
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
999
CRYPTO_ALG_ASYNC |
1000
CRYPTO_ALG_NEED_FALLBACK,
1001
.cra_blocksize = SHA1_BLOCK_SIZE,
1002
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
1003
sizeof(struct omap_sham_hmac_ctx),
1004
.cra_alignmask = OMAP_ALIGN_MASK,
1005
.cra_module = THIS_MODULE,
1006
.cra_init = omap_sham_cra_sha1_init,
1007
.cra_exit = omap_sham_cra_exit,
1008
}
1009
},
1010
{
1011
.init = omap_sham_init,
1012
.update = omap_sham_update,
1013
.final = omap_sham_final,
1014
.finup = omap_sham_finup,
1015
.digest = omap_sham_digest,
1016
.setkey = omap_sham_setkey,
1017
.halg.digestsize = MD5_DIGEST_SIZE,
1018
.halg.base = {
1019
.cra_name = "hmac(md5)",
1020
.cra_driver_name = "omap-hmac-md5",
1021
.cra_priority = 100,
1022
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
1023
CRYPTO_ALG_ASYNC |
1024
CRYPTO_ALG_NEED_FALLBACK,
1025
.cra_blocksize = SHA1_BLOCK_SIZE,
1026
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
1027
sizeof(struct omap_sham_hmac_ctx),
1028
.cra_alignmask = OMAP_ALIGN_MASK,
1029
.cra_module = THIS_MODULE,
1030
.cra_init = omap_sham_cra_md5_init,
1031
.cra_exit = omap_sham_cra_exit,
1032
}
1033
}
1034
};
1035
1036
static void omap_sham_done_task(unsigned long data)
1037
{
1038
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1039
struct ahash_request *req = dd->req;
1040
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1041
int ready = 0, err = 0;
1042
1043
if (ctx->flags & FLAGS_OUTPUT_READY) {
1044
ctx->flags &= ~FLAGS_OUTPUT_READY;
1045
ready = 1;
1046
}
1047
1048
if (dd->flags & FLAGS_DMA_ACTIVE) {
1049
dd->flags &= ~FLAGS_DMA_ACTIVE;
1050
omap_sham_update_dma_stop(dd);
1051
if (!dd->err)
1052
err = omap_sham_update_dma_start(dd);
1053
}
1054
1055
err = dd->err ? : err;
1056
1057
if (err != -EINPROGRESS && (ready || err)) {
1058
dev_dbg(dd->dev, "update done: err: %d\n", err);
1059
/* finish curent request */
1060
omap_sham_finish_req(req, err);
1061
/* start new request */
1062
omap_sham_handle_queue(dd, NULL);
1063
}
1064
}
1065
1066
static void omap_sham_queue_task(unsigned long data)
1067
{
1068
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1069
1070
omap_sham_handle_queue(dd, NULL);
1071
}
1072
1073
static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1074
{
1075
struct omap_sham_dev *dd = dev_id;
1076
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
1077
1078
if (!ctx) {
1079
dev_err(dd->dev, "unknown interrupt.\n");
1080
return IRQ_HANDLED;
1081
}
1082
1083
if (unlikely(ctx->flags & FLAGS_FINAL))
1084
/* final -> allow device to go to power-saving mode */
1085
omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1086
1087
omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1088
SHA_REG_CTRL_OUTPUT_READY);
1089
omap_sham_read(dd, SHA_REG_CTRL);
1090
1091
ctx->flags |= FLAGS_OUTPUT_READY;
1092
dd->err = 0;
1093
tasklet_schedule(&dd->done_task);
1094
1095
return IRQ_HANDLED;
1096
}
1097
1098
static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1099
{
1100
struct omap_sham_dev *dd = data;
1101
1102
if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1103
pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1104
dd->err = -EIO;
1105
dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
1106
}
1107
1108
tasklet_schedule(&dd->done_task);
1109
}
1110
1111
static int omap_sham_dma_init(struct omap_sham_dev *dd)
1112
{
1113
int err;
1114
1115
dd->dma_lch = -1;
1116
1117
err = omap_request_dma(dd->dma, dev_name(dd->dev),
1118
omap_sham_dma_callback, dd, &dd->dma_lch);
1119
if (err) {
1120
dev_err(dd->dev, "Unable to request DMA channel\n");
1121
return err;
1122
}
1123
1124
return 0;
1125
}
1126
1127
static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1128
{
1129
if (dd->dma_lch >= 0) {
1130
omap_free_dma(dd->dma_lch);
1131
dd->dma_lch = -1;
1132
}
1133
}
1134
1135
static int __devinit omap_sham_probe(struct platform_device *pdev)
1136
{
1137
struct omap_sham_dev *dd;
1138
struct device *dev = &pdev->dev;
1139
struct resource *res;
1140
int err, i, j;
1141
1142
dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1143
if (dd == NULL) {
1144
dev_err(dev, "unable to alloc data struct.\n");
1145
err = -ENOMEM;
1146
goto data_err;
1147
}
1148
dd->dev = dev;
1149
platform_set_drvdata(pdev, dd);
1150
1151
INIT_LIST_HEAD(&dd->list);
1152
spin_lock_init(&dd->lock);
1153
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1154
tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
1155
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1156
1157
dd->irq = -1;
1158
1159
/* Get the base address */
1160
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1161
if (!res) {
1162
dev_err(dev, "no MEM resource info\n");
1163
err = -ENODEV;
1164
goto res_err;
1165
}
1166
dd->phys_base = res->start;
1167
1168
/* Get the DMA */
1169
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1170
if (!res) {
1171
dev_err(dev, "no DMA resource info\n");
1172
err = -ENODEV;
1173
goto res_err;
1174
}
1175
dd->dma = res->start;
1176
1177
/* Get the IRQ */
1178
dd->irq = platform_get_irq(pdev, 0);
1179
if (dd->irq < 0) {
1180
dev_err(dev, "no IRQ resource info\n");
1181
err = dd->irq;
1182
goto res_err;
1183
}
1184
1185
err = request_irq(dd->irq, omap_sham_irq,
1186
IRQF_TRIGGER_LOW, dev_name(dev), dd);
1187
if (err) {
1188
dev_err(dev, "unable to request irq.\n");
1189
goto res_err;
1190
}
1191
1192
err = omap_sham_dma_init(dd);
1193
if (err)
1194
goto dma_err;
1195
1196
/* Initializing the clock */
1197
dd->iclk = clk_get(dev, "ick");
1198
if (IS_ERR(dd->iclk)) {
1199
dev_err(dev, "clock intialization failed.\n");
1200
err = PTR_ERR(dd->iclk);
1201
goto clk_err;
1202
}
1203
1204
dd->io_base = ioremap(dd->phys_base, SZ_4K);
1205
if (!dd->io_base) {
1206
dev_err(dev, "can't ioremap\n");
1207
err = -ENOMEM;
1208
goto io_err;
1209
}
1210
1211
clk_enable(dd->iclk);
1212
dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1213
(omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1214
omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1215
clk_disable(dd->iclk);
1216
1217
spin_lock(&sham.lock);
1218
list_add_tail(&dd->list, &sham.dev_list);
1219
spin_unlock(&sham.lock);
1220
1221
for (i = 0; i < ARRAY_SIZE(algs); i++) {
1222
err = crypto_register_ahash(&algs[i]);
1223
if (err)
1224
goto err_algs;
1225
}
1226
1227
return 0;
1228
1229
err_algs:
1230
for (j = 0; j < i; j++)
1231
crypto_unregister_ahash(&algs[j]);
1232
iounmap(dd->io_base);
1233
io_err:
1234
clk_put(dd->iclk);
1235
clk_err:
1236
omap_sham_dma_cleanup(dd);
1237
dma_err:
1238
if (dd->irq >= 0)
1239
free_irq(dd->irq, dd);
1240
res_err:
1241
kfree(dd);
1242
dd = NULL;
1243
data_err:
1244
dev_err(dev, "initialization failed.\n");
1245
1246
return err;
1247
}
1248
1249
static int __devexit omap_sham_remove(struct platform_device *pdev)
1250
{
1251
static struct omap_sham_dev *dd;
1252
int i;
1253
1254
dd = platform_get_drvdata(pdev);
1255
if (!dd)
1256
return -ENODEV;
1257
spin_lock(&sham.lock);
1258
list_del(&dd->list);
1259
spin_unlock(&sham.lock);
1260
for (i = 0; i < ARRAY_SIZE(algs); i++)
1261
crypto_unregister_ahash(&algs[i]);
1262
tasklet_kill(&dd->done_task);
1263
tasklet_kill(&dd->queue_task);
1264
iounmap(dd->io_base);
1265
clk_put(dd->iclk);
1266
omap_sham_dma_cleanup(dd);
1267
if (dd->irq >= 0)
1268
free_irq(dd->irq, dd);
1269
kfree(dd);
1270
dd = NULL;
1271
1272
return 0;
1273
}
1274
1275
static struct platform_driver omap_sham_driver = {
1276
.probe = omap_sham_probe,
1277
.remove = omap_sham_remove,
1278
.driver = {
1279
.name = "omap-sham",
1280
.owner = THIS_MODULE,
1281
},
1282
};
1283
1284
static int __init omap_sham_mod_init(void)
1285
{
1286
pr_info("loading %s driver\n", "omap-sham");
1287
1288
if (!cpu_class_is_omap2() ||
1289
(omap_type() != OMAP2_DEVICE_TYPE_SEC &&
1290
omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
1291
pr_err("Unsupported cpu\n");
1292
return -ENODEV;
1293
}
1294
1295
return platform_driver_register(&omap_sham_driver);
1296
}
1297
1298
static void __exit omap_sham_mod_exit(void)
1299
{
1300
platform_driver_unregister(&omap_sham_driver);
1301
}
1302
1303
module_init(omap_sham_mod_init);
1304
module_exit(omap_sham_mod_exit);
1305
1306
MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1307
MODULE_LICENSE("GPL v2");
1308
MODULE_AUTHOR("Dmitry Kasatkin");
1309
1310