Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/atmel-aes.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Cryptographic API.
4
*
5
* Support for ATMEL AES HW acceleration.
6
*
7
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
8
* Author: Nicolas Royer <[email protected]>
9
*
10
* Some ideas are from omap-aes.c driver.
11
*/
12
13
14
#include <linux/kernel.h>
15
#include <linux/module.h>
16
#include <linux/slab.h>
17
#include <linux/err.h>
18
#include <linux/clk.h>
19
#include <linux/io.h>
20
#include <linux/hw_random.h>
21
#include <linux/platform_device.h>
22
23
#include <linux/device.h>
24
#include <linux/dmaengine.h>
25
#include <linux/init.h>
26
#include <linux/errno.h>
27
#include <linux/interrupt.h>
28
#include <linux/irq.h>
29
#include <linux/scatterlist.h>
30
#include <linux/dma-mapping.h>
31
#include <linux/mod_devicetable.h>
32
#include <linux/delay.h>
33
#include <linux/crypto.h>
34
#include <crypto/scatterwalk.h>
35
#include <crypto/algapi.h>
36
#include <crypto/aes.h>
37
#include <crypto/gcm.h>
38
#include <crypto/xts.h>
39
#include <crypto/internal/aead.h>
40
#include <crypto/internal/skcipher.h>
41
#include "atmel-aes-regs.h"
42
#include "atmel-authenc.h"
43
44
#define ATMEL_AES_PRIORITY 300
45
46
#define ATMEL_AES_BUFFER_ORDER 2
47
#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
48
49
#define SIZE_IN_WORDS(x) ((x) >> 2)
50
51
/* AES flags */
52
/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
53
#define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC
54
#define AES_FLAGS_GTAGEN AES_MR_GTAGEN
55
#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
56
#define AES_FLAGS_ECB AES_MR_OPMOD_ECB
57
#define AES_FLAGS_CBC AES_MR_OPMOD_CBC
58
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
59
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
60
#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
61
62
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
63
AES_FLAGS_ENCRYPT | \
64
AES_FLAGS_GTAGEN)
65
66
#define AES_FLAGS_BUSY BIT(3)
67
#define AES_FLAGS_DUMP_REG BIT(4)
68
#define AES_FLAGS_OWN_SHA BIT(5)
69
70
#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
71
72
#define ATMEL_AES_QUEUE_LENGTH 50
73
74
#define ATMEL_AES_DMA_THRESHOLD 256
75
76
77
struct atmel_aes_caps {
78
bool has_dualbuff;
79
bool has_gcm;
80
bool has_xts;
81
bool has_authenc;
82
u32 max_burst_size;
83
};
84
85
struct atmel_aes_dev;
86
87
88
typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
89
90
91
struct atmel_aes_base_ctx {
92
struct atmel_aes_dev *dd;
93
atmel_aes_fn_t start;
94
int keylen;
95
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
96
u16 block_size;
97
bool is_aead;
98
};
99
100
struct atmel_aes_ctx {
101
struct atmel_aes_base_ctx base;
102
};
103
104
struct atmel_aes_ctr_ctx {
105
struct atmel_aes_base_ctx base;
106
107
__be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
108
size_t offset;
109
struct scatterlist src[2];
110
struct scatterlist dst[2];
111
u32 blocks;
112
};
113
114
struct atmel_aes_gcm_ctx {
115
struct atmel_aes_base_ctx base;
116
117
struct scatterlist src[2];
118
struct scatterlist dst[2];
119
120
__be32 j0[AES_BLOCK_SIZE / sizeof(u32)];
121
u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
122
__be32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
123
size_t textlen;
124
125
const __be32 *ghash_in;
126
__be32 *ghash_out;
127
atmel_aes_fn_t ghash_resume;
128
};
129
130
struct atmel_aes_xts_ctx {
131
struct atmel_aes_base_ctx base;
132
133
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
134
struct crypto_skcipher *fallback_tfm;
135
};
136
137
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
138
struct atmel_aes_authenc_ctx {
139
struct atmel_aes_base_ctx base;
140
struct atmel_sha_authenc_ctx *auth;
141
};
142
#endif
143
144
struct atmel_aes_reqctx {
145
unsigned long mode;
146
u8 lastc[AES_BLOCK_SIZE];
147
struct skcipher_request fallback_req;
148
};
149
150
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
151
struct atmel_aes_authenc_reqctx {
152
struct atmel_aes_reqctx base;
153
154
struct scatterlist src[2];
155
struct scatterlist dst[2];
156
size_t textlen;
157
u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)];
158
159
/* auth_req MUST be place last. */
160
struct ahash_request auth_req;
161
};
162
#endif
163
164
struct atmel_aes_dma {
165
struct dma_chan *chan;
166
struct scatterlist *sg;
167
int nents;
168
unsigned int remainder;
169
unsigned int sg_len;
170
};
171
172
struct atmel_aes_dev {
173
struct list_head list;
174
unsigned long phys_base;
175
void __iomem *io_base;
176
177
struct crypto_async_request *areq;
178
struct atmel_aes_base_ctx *ctx;
179
180
bool is_async;
181
atmel_aes_fn_t resume;
182
atmel_aes_fn_t cpu_transfer_complete;
183
184
struct device *dev;
185
struct clk *iclk;
186
int irq;
187
188
unsigned long flags;
189
190
spinlock_t lock;
191
struct crypto_queue queue;
192
193
struct tasklet_struct done_task;
194
struct tasklet_struct queue_task;
195
196
size_t total;
197
size_t datalen;
198
u32 *data;
199
200
struct atmel_aes_dma src;
201
struct atmel_aes_dma dst;
202
203
size_t buflen;
204
void *buf;
205
struct scatterlist aligned_sg;
206
struct scatterlist *real_dst;
207
208
struct atmel_aes_caps caps;
209
210
u32 hw_version;
211
};
212
213
struct atmel_aes_drv {
214
struct list_head dev_list;
215
spinlock_t lock;
216
};
217
218
static struct atmel_aes_drv atmel_aes = {
219
.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
220
.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
221
};
222
223
#ifdef VERBOSE_DEBUG
224
static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
225
{
226
switch (offset) {
227
case AES_CR:
228
return "CR";
229
230
case AES_MR:
231
return "MR";
232
233
case AES_ISR:
234
return "ISR";
235
236
case AES_IMR:
237
return "IMR";
238
239
case AES_IER:
240
return "IER";
241
242
case AES_IDR:
243
return "IDR";
244
245
case AES_KEYWR(0):
246
case AES_KEYWR(1):
247
case AES_KEYWR(2):
248
case AES_KEYWR(3):
249
case AES_KEYWR(4):
250
case AES_KEYWR(5):
251
case AES_KEYWR(6):
252
case AES_KEYWR(7):
253
snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
254
break;
255
256
case AES_IDATAR(0):
257
case AES_IDATAR(1):
258
case AES_IDATAR(2):
259
case AES_IDATAR(3):
260
snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
261
break;
262
263
case AES_ODATAR(0):
264
case AES_ODATAR(1):
265
case AES_ODATAR(2):
266
case AES_ODATAR(3):
267
snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
268
break;
269
270
case AES_IVR(0):
271
case AES_IVR(1):
272
case AES_IVR(2):
273
case AES_IVR(3):
274
snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
275
break;
276
277
case AES_AADLENR:
278
return "AADLENR";
279
280
case AES_CLENR:
281
return "CLENR";
282
283
case AES_GHASHR(0):
284
case AES_GHASHR(1):
285
case AES_GHASHR(2):
286
case AES_GHASHR(3):
287
snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
288
break;
289
290
case AES_TAGR(0):
291
case AES_TAGR(1):
292
case AES_TAGR(2):
293
case AES_TAGR(3):
294
snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
295
break;
296
297
case AES_CTRR:
298
return "CTRR";
299
300
case AES_GCMHR(0):
301
case AES_GCMHR(1):
302
case AES_GCMHR(2):
303
case AES_GCMHR(3):
304
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
305
break;
306
307
case AES_EMR:
308
return "EMR";
309
310
case AES_TWR(0):
311
case AES_TWR(1):
312
case AES_TWR(2):
313
case AES_TWR(3):
314
snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
315
break;
316
317
case AES_ALPHAR(0):
318
case AES_ALPHAR(1):
319
case AES_ALPHAR(2):
320
case AES_ALPHAR(3):
321
snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
322
break;
323
324
default:
325
snprintf(tmp, sz, "0x%02x", offset);
326
break;
327
}
328
329
return tmp;
330
}
331
#endif /* VERBOSE_DEBUG */
332
333
/* Shared functions */
334
335
static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
336
{
337
u32 value = readl_relaxed(dd->io_base + offset);
338
339
#ifdef VERBOSE_DEBUG
340
if (dd->flags & AES_FLAGS_DUMP_REG) {
341
char tmp[16];
342
343
dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
344
atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
345
}
346
#endif /* VERBOSE_DEBUG */
347
348
return value;
349
}
350
351
static inline void atmel_aes_write(struct atmel_aes_dev *dd,
352
u32 offset, u32 value)
353
{
354
#ifdef VERBOSE_DEBUG
355
if (dd->flags & AES_FLAGS_DUMP_REG) {
356
char tmp[16];
357
358
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
359
atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
360
}
361
#endif /* VERBOSE_DEBUG */
362
363
writel_relaxed(value, dd->io_base + offset);
364
}
365
366
static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
367
u32 *value, int count)
368
{
369
for (; count--; value++, offset += 4)
370
*value = atmel_aes_read(dd, offset);
371
}
372
373
static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
374
const u32 *value, int count)
375
{
376
for (; count--; value++, offset += 4)
377
atmel_aes_write(dd, offset, *value);
378
}
379
380
static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
381
void *value)
382
{
383
atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
384
}
385
386
static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
387
const void *value)
388
{
389
atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
390
}
391
392
static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
393
atmel_aes_fn_t resume)
394
{
395
u32 isr = atmel_aes_read(dd, AES_ISR);
396
397
if (unlikely(isr & AES_INT_DATARDY))
398
return resume(dd);
399
400
dd->resume = resume;
401
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
402
return -EINPROGRESS;
403
}
404
405
static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
406
{
407
len &= block_size - 1;
408
return len ? block_size - len : 0;
409
}
410
411
static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
412
{
413
struct atmel_aes_dev *aes_dd;
414
415
spin_lock_bh(&atmel_aes.lock);
416
/* One AES IP per SoC. */
417
aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
418
struct atmel_aes_dev, list);
419
spin_unlock_bh(&atmel_aes.lock);
420
return aes_dd;
421
}
422
423
static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
424
{
425
int err;
426
427
err = clk_enable(dd->iclk);
428
if (err)
429
return err;
430
431
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
432
atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
433
434
return 0;
435
}
436
437
static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
438
{
439
return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
440
}
441
442
static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
443
{
444
int err;
445
446
err = atmel_aes_hw_init(dd);
447
if (err)
448
return err;
449
450
dd->hw_version = atmel_aes_get_version(dd);
451
452
dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
453
454
clk_disable(dd->iclk);
455
return 0;
456
}
457
458
static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
459
const struct atmel_aes_reqctx *rctx)
460
{
461
/* Clear all but persistent flags and set request flags. */
462
dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
463
}
464
465
static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
466
{
467
return (dd->flags & AES_FLAGS_ENCRYPT);
468
}
469
470
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
471
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
472
#endif
473
474
static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
475
{
476
struct skcipher_request *req = skcipher_request_cast(dd->areq);
477
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
478
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
479
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
480
481
if (req->cryptlen < ivsize)
482
return;
483
484
if (rctx->mode & AES_FLAGS_ENCRYPT)
485
scatterwalk_map_and_copy(req->iv, req->dst,
486
req->cryptlen - ivsize, ivsize, 0);
487
else
488
memcpy(req->iv, rctx->lastc, ivsize);
489
}
490
491
static inline struct atmel_aes_ctr_ctx *
492
atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
493
{
494
return container_of(ctx, struct atmel_aes_ctr_ctx, base);
495
}
496
497
static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
498
{
499
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
500
struct skcipher_request *req = skcipher_request_cast(dd->areq);
501
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
502
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
503
int i;
504
505
/*
506
* The CTR transfer works in fragments of data of maximum 1 MByte
507
* because of the 16 bit CTR counter embedded in the IP. When reaching
508
* here, ctx->blocks contains the number of blocks of the last fragment
509
* processed, there is no need to explicit cast it to u16.
510
*/
511
for (i = 0; i < ctx->blocks; i++)
512
crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
513
514
memcpy(req->iv, ctx->iv, ivsize);
515
}
516
517
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
518
{
519
struct skcipher_request *req = skcipher_request_cast(dd->areq);
520
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
521
522
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
523
if (dd->ctx->is_aead)
524
atmel_aes_authenc_complete(dd, err);
525
#endif
526
527
clk_disable(dd->iclk);
528
dd->flags &= ~AES_FLAGS_BUSY;
529
530
if (!err && !dd->ctx->is_aead &&
531
(rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
532
if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
533
atmel_aes_set_iv_as_last_ciphertext_block(dd);
534
else
535
atmel_aes_ctr_update_req_iv(dd);
536
}
537
538
if (dd->is_async)
539
crypto_request_complete(dd->areq, err);
540
541
tasklet_schedule(&dd->queue_task);
542
543
return err;
544
}
545
546
static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
547
const __be32 *iv, const u32 *key, int keylen)
548
{
549
u32 valmr = 0;
550
551
/* MR register must be set before IV registers */
552
if (keylen == AES_KEYSIZE_128)
553
valmr |= AES_MR_KEYSIZE_128;
554
else if (keylen == AES_KEYSIZE_192)
555
valmr |= AES_MR_KEYSIZE_192;
556
else
557
valmr |= AES_MR_KEYSIZE_256;
558
559
valmr |= dd->flags & AES_FLAGS_MODE_MASK;
560
561
if (use_dma) {
562
valmr |= AES_MR_SMOD_IDATAR0;
563
if (dd->caps.has_dualbuff)
564
valmr |= AES_MR_DUALBUFF;
565
} else {
566
valmr |= AES_MR_SMOD_AUTO;
567
}
568
569
atmel_aes_write(dd, AES_MR, valmr);
570
571
atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
572
573
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
574
atmel_aes_write_block(dd, AES_IVR(0), iv);
575
}
576
577
static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
578
const __be32 *iv)
579
580
{
581
atmel_aes_write_ctrl_key(dd, use_dma, iv,
582
dd->ctx->key, dd->ctx->keylen);
583
}
584
585
/* CPU transfer */
586
587
static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
588
{
589
int err = 0;
590
u32 isr;
591
592
for (;;) {
593
atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
594
dd->data += 4;
595
dd->datalen -= AES_BLOCK_SIZE;
596
597
if (dd->datalen < AES_BLOCK_SIZE)
598
break;
599
600
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
601
602
isr = atmel_aes_read(dd, AES_ISR);
603
if (!(isr & AES_INT_DATARDY)) {
604
dd->resume = atmel_aes_cpu_transfer;
605
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
606
return -EINPROGRESS;
607
}
608
}
609
610
if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
611
dd->buf, dd->total))
612
err = -EINVAL;
613
614
if (err)
615
return atmel_aes_complete(dd, err);
616
617
return dd->cpu_transfer_complete(dd);
618
}
619
620
static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
621
struct scatterlist *src,
622
struct scatterlist *dst,
623
size_t len,
624
atmel_aes_fn_t resume)
625
{
626
size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
627
628
if (unlikely(len == 0))
629
return -EINVAL;
630
631
sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
632
633
dd->total = len;
634
dd->real_dst = dst;
635
dd->cpu_transfer_complete = resume;
636
dd->datalen = len + padlen;
637
dd->data = (u32 *)dd->buf;
638
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
639
return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
640
}
641
642
643
/* DMA transfer */
644
645
static void atmel_aes_dma_callback(void *data);
646
647
static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
648
struct scatterlist *sg,
649
size_t len,
650
struct atmel_aes_dma *dma)
651
{
652
int nents;
653
654
if (!IS_ALIGNED(len, dd->ctx->block_size))
655
return false;
656
657
for (nents = 0; sg; sg = sg_next(sg), ++nents) {
658
if (!IS_ALIGNED(sg->offset, sizeof(u32)))
659
return false;
660
661
if (len <= sg->length) {
662
if (!IS_ALIGNED(len, dd->ctx->block_size))
663
return false;
664
665
dma->nents = nents+1;
666
dma->remainder = sg->length - len;
667
sg->length = len;
668
return true;
669
}
670
671
if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
672
return false;
673
674
len -= sg->length;
675
}
676
677
return false;
678
}
679
680
static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
681
{
682
struct scatterlist *sg = dma->sg;
683
int nents = dma->nents;
684
685
if (!dma->remainder)
686
return;
687
688
while (--nents > 0 && sg)
689
sg = sg_next(sg);
690
691
if (!sg)
692
return;
693
694
sg->length += dma->remainder;
695
}
696
697
static int atmel_aes_map(struct atmel_aes_dev *dd,
698
struct scatterlist *src,
699
struct scatterlist *dst,
700
size_t len)
701
{
702
bool src_aligned, dst_aligned;
703
size_t padlen;
704
705
dd->total = len;
706
dd->src.sg = src;
707
dd->dst.sg = dst;
708
dd->real_dst = dst;
709
710
src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
711
if (src == dst)
712
dst_aligned = src_aligned;
713
else
714
dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
715
if (!src_aligned || !dst_aligned) {
716
padlen = atmel_aes_padlen(len, dd->ctx->block_size);
717
718
if (dd->buflen < len + padlen)
719
return -ENOMEM;
720
721
if (!src_aligned) {
722
sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
723
dd->src.sg = &dd->aligned_sg;
724
dd->src.nents = 1;
725
dd->src.remainder = 0;
726
}
727
728
if (!dst_aligned) {
729
dd->dst.sg = &dd->aligned_sg;
730
dd->dst.nents = 1;
731
dd->dst.remainder = 0;
732
}
733
734
sg_init_table(&dd->aligned_sg, 1);
735
sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
736
}
737
738
if (dd->src.sg == dd->dst.sg) {
739
dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
740
DMA_BIDIRECTIONAL);
741
dd->dst.sg_len = dd->src.sg_len;
742
if (!dd->src.sg_len)
743
return -EFAULT;
744
} else {
745
dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
746
DMA_TO_DEVICE);
747
if (!dd->src.sg_len)
748
return -EFAULT;
749
750
dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
751
DMA_FROM_DEVICE);
752
if (!dd->dst.sg_len) {
753
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
754
DMA_TO_DEVICE);
755
return -EFAULT;
756
}
757
}
758
759
return 0;
760
}
761
762
static void atmel_aes_unmap(struct atmel_aes_dev *dd)
763
{
764
if (dd->src.sg == dd->dst.sg) {
765
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
766
DMA_BIDIRECTIONAL);
767
768
if (dd->src.sg != &dd->aligned_sg)
769
atmel_aes_restore_sg(&dd->src);
770
} else {
771
dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
772
DMA_FROM_DEVICE);
773
774
if (dd->dst.sg != &dd->aligned_sg)
775
atmel_aes_restore_sg(&dd->dst);
776
777
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
778
DMA_TO_DEVICE);
779
780
if (dd->src.sg != &dd->aligned_sg)
781
atmel_aes_restore_sg(&dd->src);
782
}
783
784
if (dd->dst.sg == &dd->aligned_sg)
785
sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
786
dd->buf, dd->total);
787
}
788
789
static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
790
enum dma_slave_buswidth addr_width,
791
enum dma_transfer_direction dir,
792
u32 maxburst)
793
{
794
struct dma_async_tx_descriptor *desc;
795
struct dma_slave_config config;
796
dma_async_tx_callback callback;
797
struct atmel_aes_dma *dma;
798
int err;
799
800
memset(&config, 0, sizeof(config));
801
config.src_addr_width = addr_width;
802
config.dst_addr_width = addr_width;
803
config.src_maxburst = maxburst;
804
config.dst_maxburst = maxburst;
805
806
switch (dir) {
807
case DMA_MEM_TO_DEV:
808
dma = &dd->src;
809
callback = NULL;
810
config.dst_addr = dd->phys_base + AES_IDATAR(0);
811
break;
812
813
case DMA_DEV_TO_MEM:
814
dma = &dd->dst;
815
callback = atmel_aes_dma_callback;
816
config.src_addr = dd->phys_base + AES_ODATAR(0);
817
break;
818
819
default:
820
return -EINVAL;
821
}
822
823
err = dmaengine_slave_config(dma->chan, &config);
824
if (err)
825
return err;
826
827
desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
828
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
829
if (!desc)
830
return -ENOMEM;
831
832
desc->callback = callback;
833
desc->callback_param = dd;
834
dmaengine_submit(desc);
835
dma_async_issue_pending(dma->chan);
836
837
return 0;
838
}
839
840
static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
841
struct scatterlist *src,
842
struct scatterlist *dst,
843
size_t len,
844
atmel_aes_fn_t resume)
845
{
846
enum dma_slave_buswidth addr_width;
847
u32 maxburst;
848
int err;
849
850
switch (dd->ctx->block_size) {
851
case AES_BLOCK_SIZE:
852
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
853
maxburst = dd->caps.max_burst_size;
854
break;
855
856
default:
857
err = -EINVAL;
858
goto exit;
859
}
860
861
err = atmel_aes_map(dd, src, dst, len);
862
if (err)
863
goto exit;
864
865
dd->resume = resume;
866
867
/* Set output DMA transfer first */
868
err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
869
maxburst);
870
if (err)
871
goto unmap;
872
873
/* Then set input DMA transfer */
874
err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
875
maxburst);
876
if (err)
877
goto output_transfer_stop;
878
879
return -EINPROGRESS;
880
881
output_transfer_stop:
882
dmaengine_terminate_sync(dd->dst.chan);
883
unmap:
884
atmel_aes_unmap(dd);
885
exit:
886
return atmel_aes_complete(dd, err);
887
}
888
889
static void atmel_aes_dma_callback(void *data)
890
{
891
struct atmel_aes_dev *dd = data;
892
893
atmel_aes_unmap(dd);
894
dd->is_async = true;
895
(void)dd->resume(dd);
896
}
897
898
static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
899
struct crypto_async_request *new_areq)
900
{
901
struct crypto_async_request *areq, *backlog;
902
struct atmel_aes_base_ctx *ctx;
903
unsigned long flags;
904
bool start_async;
905
int err, ret = 0;
906
907
spin_lock_irqsave(&dd->lock, flags);
908
if (new_areq)
909
ret = crypto_enqueue_request(&dd->queue, new_areq);
910
if (dd->flags & AES_FLAGS_BUSY) {
911
spin_unlock_irqrestore(&dd->lock, flags);
912
return ret;
913
}
914
backlog = crypto_get_backlog(&dd->queue);
915
areq = crypto_dequeue_request(&dd->queue);
916
if (areq)
917
dd->flags |= AES_FLAGS_BUSY;
918
spin_unlock_irqrestore(&dd->lock, flags);
919
920
if (!areq)
921
return ret;
922
923
if (backlog)
924
crypto_request_complete(backlog, -EINPROGRESS);
925
926
ctx = crypto_tfm_ctx(areq->tfm);
927
928
dd->areq = areq;
929
dd->ctx = ctx;
930
start_async = (areq != new_areq);
931
dd->is_async = start_async;
932
933
/* WARNING: ctx->start() MAY change dd->is_async. */
934
err = ctx->start(dd);
935
return (start_async) ? ret : err;
936
}
937
938
939
/* AES async block ciphers */
940
941
static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
942
{
943
return atmel_aes_complete(dd, 0);
944
}
945
946
static int atmel_aes_start(struct atmel_aes_dev *dd)
947
{
948
struct skcipher_request *req = skcipher_request_cast(dd->areq);
949
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
950
bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
951
dd->ctx->block_size != AES_BLOCK_SIZE);
952
int err;
953
954
atmel_aes_set_mode(dd, rctx);
955
956
err = atmel_aes_hw_init(dd);
957
if (err)
958
return atmel_aes_complete(dd, err);
959
960
atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
961
if (use_dma)
962
return atmel_aes_dma_start(dd, req->src, req->dst,
963
req->cryptlen,
964
atmel_aes_transfer_complete);
965
966
return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
967
atmel_aes_transfer_complete);
968
}
969
970
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
971
{
972
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
973
struct skcipher_request *req = skcipher_request_cast(dd->areq);
974
struct scatterlist *src, *dst;
975
size_t datalen;
976
u32 ctr;
977
u16 start, end;
978
bool use_dma, fragmented = false;
979
980
/* Check for transfer completion. */
981
ctx->offset += dd->total;
982
if (ctx->offset >= req->cryptlen)
983
return atmel_aes_transfer_complete(dd);
984
985
/* Compute data length. */
986
datalen = req->cryptlen - ctx->offset;
987
ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
988
ctr = be32_to_cpu(ctx->iv[3]);
989
990
/* Check 16bit counter overflow. */
991
start = ctr & 0xffff;
992
end = start + ctx->blocks - 1;
993
994
if (ctx->blocks >> 16 || end < start) {
995
ctr |= 0xffff;
996
datalen = AES_BLOCK_SIZE * (0x10000 - start);
997
fragmented = true;
998
}
999
1000
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
1001
1002
/* Jump to offset. */
1003
src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
1004
dst = ((req->src == req->dst) ? src :
1005
scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
1006
1007
/* Configure hardware. */
1008
atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
1009
if (unlikely(fragmented)) {
1010
/*
1011
* Increment the counter manually to cope with the hardware
1012
* counter overflow.
1013
*/
1014
ctx->iv[3] = cpu_to_be32(ctr);
1015
crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
1016
}
1017
1018
if (use_dma)
1019
return atmel_aes_dma_start(dd, src, dst, datalen,
1020
atmel_aes_ctr_transfer);
1021
1022
return atmel_aes_cpu_start(dd, src, dst, datalen,
1023
atmel_aes_ctr_transfer);
1024
}
1025
1026
static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
1027
{
1028
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1029
struct skcipher_request *req = skcipher_request_cast(dd->areq);
1030
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1031
int err;
1032
1033
atmel_aes_set_mode(dd, rctx);
1034
1035
err = atmel_aes_hw_init(dd);
1036
if (err)
1037
return atmel_aes_complete(dd, err);
1038
1039
memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
1040
ctx->offset = 0;
1041
dd->total = 0;
1042
return atmel_aes_ctr_transfer(dd);
1043
}
1044
1045
static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
1046
{
1047
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1048
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
1049
crypto_skcipher_reqtfm(req));
1050
1051
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
1052
skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
1053
req->base.complete, req->base.data);
1054
skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
1055
req->cryptlen, req->iv);
1056
1057
return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1058
crypto_skcipher_decrypt(&rctx->fallback_req);
1059
}
1060
1061
static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
1062
{
1063
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1064
struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
1065
struct atmel_aes_reqctx *rctx;
1066
u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
1067
1068
if (opmode == AES_FLAGS_XTS) {
1069
if (req->cryptlen < XTS_BLOCK_SIZE)
1070
return -EINVAL;
1071
1072
if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
1073
return atmel_aes_xts_fallback(req,
1074
mode & AES_FLAGS_ENCRYPT);
1075
}
1076
1077
/*
1078
* ECB, CBC or CTR mode require the plaintext and ciphertext
1079
* to have a positve integer length.
1080
*/
1081
if (!req->cryptlen && opmode != AES_FLAGS_XTS)
1082
return 0;
1083
1084
if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
1085
!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
1086
return -EINVAL;
1087
1088
ctx->block_size = AES_BLOCK_SIZE;
1089
ctx->is_aead = false;
1090
1091
rctx = skcipher_request_ctx(req);
1092
rctx->mode = mode;
1093
1094
if (opmode != AES_FLAGS_ECB &&
1095
!(mode & AES_FLAGS_ENCRYPT)) {
1096
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1097
1098
if (req->cryptlen >= ivsize)
1099
scatterwalk_map_and_copy(rctx->lastc, req->src,
1100
req->cryptlen - ivsize,
1101
ivsize, 0);
1102
}
1103
1104
return atmel_aes_handle_queue(ctx->dd, &req->base);
1105
}
1106
1107
static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
1108
unsigned int keylen)
1109
{
1110
struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
1111
1112
if (keylen != AES_KEYSIZE_128 &&
1113
keylen != AES_KEYSIZE_192 &&
1114
keylen != AES_KEYSIZE_256)
1115
return -EINVAL;
1116
1117
memcpy(ctx->key, key, keylen);
1118
ctx->keylen = keylen;
1119
1120
return 0;
1121
}
1122
1123
static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
1124
{
1125
return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1126
}
1127
1128
static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
1129
{
1130
return atmel_aes_crypt(req, AES_FLAGS_ECB);
1131
}
1132
1133
static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
1134
{
1135
return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1136
}
1137
1138
static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
1139
{
1140
return atmel_aes_crypt(req, AES_FLAGS_CBC);
1141
}
1142
1143
static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
1144
{
1145
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1146
}
1147
1148
static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
1149
{
1150
return atmel_aes_crypt(req, AES_FLAGS_CTR);
1151
}
1152
1153
static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
1154
{
1155
struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1156
struct atmel_aes_dev *dd;
1157
1158
dd = atmel_aes_dev_alloc(&ctx->base);
1159
if (!dd)
1160
return -ENODEV;
1161
1162
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1163
ctx->base.dd = dd;
1164
ctx->base.start = atmel_aes_start;
1165
1166
return 0;
1167
}
1168
1169
static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
1170
{
1171
struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1172
struct atmel_aes_dev *dd;
1173
1174
dd = atmel_aes_dev_alloc(&ctx->base);
1175
if (!dd)
1176
return -ENODEV;
1177
1178
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1179
ctx->base.dd = dd;
1180
ctx->base.start = atmel_aes_ctr_start;
1181
1182
return 0;
1183
}
1184
1185
static struct skcipher_alg aes_algs[] = {
1186
{
1187
.base.cra_name = "ecb(aes)",
1188
.base.cra_driver_name = "atmel-ecb-aes",
1189
.base.cra_blocksize = AES_BLOCK_SIZE,
1190
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
1191
1192
.init = atmel_aes_init_tfm,
1193
.min_keysize = AES_MIN_KEY_SIZE,
1194
.max_keysize = AES_MAX_KEY_SIZE,
1195
.setkey = atmel_aes_setkey,
1196
.encrypt = atmel_aes_ecb_encrypt,
1197
.decrypt = atmel_aes_ecb_decrypt,
1198
},
1199
{
1200
.base.cra_name = "cbc(aes)",
1201
.base.cra_driver_name = "atmel-cbc-aes",
1202
.base.cra_blocksize = AES_BLOCK_SIZE,
1203
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
1204
1205
.init = atmel_aes_init_tfm,
1206
.min_keysize = AES_MIN_KEY_SIZE,
1207
.max_keysize = AES_MAX_KEY_SIZE,
1208
.setkey = atmel_aes_setkey,
1209
.encrypt = atmel_aes_cbc_encrypt,
1210
.decrypt = atmel_aes_cbc_decrypt,
1211
.ivsize = AES_BLOCK_SIZE,
1212
},
1213
{
1214
.base.cra_name = "ctr(aes)",
1215
.base.cra_driver_name = "atmel-ctr-aes",
1216
.base.cra_blocksize = 1,
1217
.base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
1218
1219
.init = atmel_aes_ctr_init_tfm,
1220
.min_keysize = AES_MIN_KEY_SIZE,
1221
.max_keysize = AES_MAX_KEY_SIZE,
1222
.setkey = atmel_aes_setkey,
1223
.encrypt = atmel_aes_ctr_encrypt,
1224
.decrypt = atmel_aes_ctr_decrypt,
1225
.ivsize = AES_BLOCK_SIZE,
1226
},
1227
};
1228
1229
1230
/* gcm aead functions */
1231
1232
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1233
const u32 *data, size_t datalen,
1234
const __be32 *ghash_in, __be32 *ghash_out,
1235
atmel_aes_fn_t resume);
1236
static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1237
static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1238
1239
static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1240
static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1241
static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1242
static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1243
static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1244
static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1245
static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1246
1247
static inline struct atmel_aes_gcm_ctx *
1248
atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1249
{
1250
return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1251
}
1252
1253
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1254
const u32 *data, size_t datalen,
1255
const __be32 *ghash_in, __be32 *ghash_out,
1256
atmel_aes_fn_t resume)
1257
{
1258
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1259
1260
dd->data = (u32 *)data;
1261
dd->datalen = datalen;
1262
ctx->ghash_in = ghash_in;
1263
ctx->ghash_out = ghash_out;
1264
ctx->ghash_resume = resume;
1265
1266
atmel_aes_write_ctrl(dd, false, NULL);
1267
return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1268
}
1269
1270
static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1271
{
1272
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1273
1274
/* Set the data length. */
1275
atmel_aes_write(dd, AES_AADLENR, dd->total);
1276
atmel_aes_write(dd, AES_CLENR, 0);
1277
1278
/* If needed, overwrite the GCM Intermediate Hash Word Registers */
1279
if (ctx->ghash_in)
1280
atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1281
1282
return atmel_aes_gcm_ghash_finalize(dd);
1283
}
1284
1285
static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1286
{
1287
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1288
u32 isr;
1289
1290
/* Write data into the Input Data Registers. */
1291
while (dd->datalen > 0) {
1292
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1293
dd->data += 4;
1294
dd->datalen -= AES_BLOCK_SIZE;
1295
1296
isr = atmel_aes_read(dd, AES_ISR);
1297
if (!(isr & AES_INT_DATARDY)) {
1298
dd->resume = atmel_aes_gcm_ghash_finalize;
1299
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1300
return -EINPROGRESS;
1301
}
1302
}
1303
1304
/* Read the computed hash from GHASHRx. */
1305
atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1306
1307
return ctx->ghash_resume(dd);
1308
}
1309
1310
1311
static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1312
{
1313
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1314
struct aead_request *req = aead_request_cast(dd->areq);
1315
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1316
struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1317
size_t ivsize = crypto_aead_ivsize(tfm);
1318
size_t datalen, padlen;
1319
const void *iv = req->iv;
1320
u8 *data = dd->buf;
1321
int err;
1322
1323
atmel_aes_set_mode(dd, rctx);
1324
1325
err = atmel_aes_hw_init(dd);
1326
if (err)
1327
return atmel_aes_complete(dd, err);
1328
1329
if (likely(ivsize == GCM_AES_IV_SIZE)) {
1330
memcpy(ctx->j0, iv, ivsize);
1331
ctx->j0[3] = cpu_to_be32(1);
1332
return atmel_aes_gcm_process(dd);
1333
}
1334
1335
padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1336
datalen = ivsize + padlen + AES_BLOCK_SIZE;
1337
if (datalen > dd->buflen)
1338
return atmel_aes_complete(dd, -EINVAL);
1339
1340
memcpy(data, iv, ivsize);
1341
memset(data + ivsize, 0, padlen + sizeof(u64));
1342
((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1343
1344
return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1345
NULL, ctx->j0, atmel_aes_gcm_process);
1346
}
1347
1348
static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1349
{
1350
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1351
struct aead_request *req = aead_request_cast(dd->areq);
1352
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1353
bool enc = atmel_aes_is_encrypt(dd);
1354
u32 authsize;
1355
1356
/* Compute text length. */
1357
authsize = crypto_aead_authsize(tfm);
1358
ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1359
1360
/*
1361
* According to tcrypt test suite, the GCM Automatic Tag Generation
1362
* fails when both the message and its associated data are empty.
1363
*/
1364
if (likely(req->assoclen != 0 || ctx->textlen != 0))
1365
dd->flags |= AES_FLAGS_GTAGEN;
1366
1367
atmel_aes_write_ctrl(dd, false, NULL);
1368
return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1369
}
1370
1371
static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1372
{
1373
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1374
struct aead_request *req = aead_request_cast(dd->areq);
1375
__be32 j0_lsw, *j0 = ctx->j0;
1376
size_t padlen;
1377
1378
/* Write incr32(J0) into IV. */
1379
j0_lsw = j0[3];
1380
be32_add_cpu(&j0[3], 1);
1381
atmel_aes_write_block(dd, AES_IVR(0), j0);
1382
j0[3] = j0_lsw;
1383
1384
/* Set aad and text lengths. */
1385
atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1386
atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1387
1388
/* Check whether AAD are present. */
1389
if (unlikely(req->assoclen == 0)) {
1390
dd->datalen = 0;
1391
return atmel_aes_gcm_data(dd);
1392
}
1393
1394
/* Copy assoc data and add padding. */
1395
padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1396
if (unlikely(req->assoclen + padlen > dd->buflen))
1397
return atmel_aes_complete(dd, -EINVAL);
1398
sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1399
1400
/* Write assoc data into the Input Data register. */
1401
dd->data = (u32 *)dd->buf;
1402
dd->datalen = req->assoclen + padlen;
1403
return atmel_aes_gcm_data(dd);
1404
}
1405
1406
static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1407
{
1408
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1409
struct aead_request *req = aead_request_cast(dd->areq);
1410
bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1411
struct scatterlist *src, *dst;
1412
u32 isr, mr;
1413
1414
/* Write AAD first. */
1415
while (dd->datalen > 0) {
1416
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1417
dd->data += 4;
1418
dd->datalen -= AES_BLOCK_SIZE;
1419
1420
isr = atmel_aes_read(dd, AES_ISR);
1421
if (!(isr & AES_INT_DATARDY)) {
1422
dd->resume = atmel_aes_gcm_data;
1423
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1424
return -EINPROGRESS;
1425
}
1426
}
1427
1428
/* GMAC only. */
1429
if (unlikely(ctx->textlen == 0))
1430
return atmel_aes_gcm_tag_init(dd);
1431
1432
/* Prepare src and dst scatter lists to transfer cipher/plain texts */
1433
src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1434
dst = ((req->src == req->dst) ? src :
1435
scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1436
1437
if (use_dma) {
1438
/* Update the Mode Register for DMA transfers. */
1439
mr = atmel_aes_read(dd, AES_MR);
1440
mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1441
mr |= AES_MR_SMOD_IDATAR0;
1442
if (dd->caps.has_dualbuff)
1443
mr |= AES_MR_DUALBUFF;
1444
atmel_aes_write(dd, AES_MR, mr);
1445
1446
return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1447
atmel_aes_gcm_tag_init);
1448
}
1449
1450
return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1451
atmel_aes_gcm_tag_init);
1452
}
1453
1454
static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1455
{
1456
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1457
struct aead_request *req = aead_request_cast(dd->areq);
1458
__be64 *data = dd->buf;
1459
1460
if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1461
if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1462
dd->resume = atmel_aes_gcm_tag_init;
1463
atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1464
return -EINPROGRESS;
1465
}
1466
1467
return atmel_aes_gcm_finalize(dd);
1468
}
1469
1470
/* Read the GCM Intermediate Hash Word Registers. */
1471
atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1472
1473
data[0] = cpu_to_be64(req->assoclen * 8);
1474
data[1] = cpu_to_be64(ctx->textlen * 8);
1475
1476
return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1477
ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1478
}
1479
1480
static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1481
{
1482
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1483
unsigned long flags;
1484
1485
/*
1486
* Change mode to CTR to complete the tag generation.
1487
* Use J0 as Initialization Vector.
1488
*/
1489
flags = dd->flags;
1490
dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1491
dd->flags |= AES_FLAGS_CTR;
1492
atmel_aes_write_ctrl(dd, false, ctx->j0);
1493
dd->flags = flags;
1494
1495
atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1496
return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1497
}
1498
1499
static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1500
{
1501
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1502
struct aead_request *req = aead_request_cast(dd->areq);
1503
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1504
bool enc = atmel_aes_is_encrypt(dd);
1505
u32 offset, authsize, itag[4], *otag = ctx->tag;
1506
int err;
1507
1508
/* Read the computed tag. */
1509
if (likely(dd->flags & AES_FLAGS_GTAGEN))
1510
atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1511
else
1512
atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1513
1514
offset = req->assoclen + ctx->textlen;
1515
authsize = crypto_aead_authsize(tfm);
1516
if (enc) {
1517
scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1518
err = 0;
1519
} else {
1520
scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1521
err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1522
}
1523
1524
return atmel_aes_complete(dd, err);
1525
}
1526
1527
static int atmel_aes_gcm_crypt(struct aead_request *req,
1528
unsigned long mode)
1529
{
1530
struct atmel_aes_base_ctx *ctx;
1531
struct atmel_aes_reqctx *rctx;
1532
1533
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1534
ctx->block_size = AES_BLOCK_SIZE;
1535
ctx->is_aead = true;
1536
1537
rctx = aead_request_ctx(req);
1538
rctx->mode = AES_FLAGS_GCM | mode;
1539
1540
return atmel_aes_handle_queue(ctx->dd, &req->base);
1541
}
1542
1543
static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1544
unsigned int keylen)
1545
{
1546
struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1547
1548
if (keylen != AES_KEYSIZE_256 &&
1549
keylen != AES_KEYSIZE_192 &&
1550
keylen != AES_KEYSIZE_128)
1551
return -EINVAL;
1552
1553
memcpy(ctx->key, key, keylen);
1554
ctx->keylen = keylen;
1555
1556
return 0;
1557
}
1558
1559
static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1560
unsigned int authsize)
1561
{
1562
return crypto_gcm_check_authsize(authsize);
1563
}
1564
1565
static int atmel_aes_gcm_encrypt(struct aead_request *req)
1566
{
1567
return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1568
}
1569
1570
static int atmel_aes_gcm_decrypt(struct aead_request *req)
1571
{
1572
return atmel_aes_gcm_crypt(req, 0);
1573
}
1574
1575
static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1576
{
1577
struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1578
struct atmel_aes_dev *dd;
1579
1580
dd = atmel_aes_dev_alloc(&ctx->base);
1581
if (!dd)
1582
return -ENODEV;
1583
1584
crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1585
ctx->base.dd = dd;
1586
ctx->base.start = atmel_aes_gcm_start;
1587
1588
return 0;
1589
}
1590
1591
static struct aead_alg aes_gcm_alg = {
1592
.setkey = atmel_aes_gcm_setkey,
1593
.setauthsize = atmel_aes_gcm_setauthsize,
1594
.encrypt = atmel_aes_gcm_encrypt,
1595
.decrypt = atmel_aes_gcm_decrypt,
1596
.init = atmel_aes_gcm_init,
1597
.ivsize = GCM_AES_IV_SIZE,
1598
.maxauthsize = AES_BLOCK_SIZE,
1599
1600
.base = {
1601
.cra_name = "gcm(aes)",
1602
.cra_driver_name = "atmel-gcm-aes",
1603
.cra_blocksize = 1,
1604
.cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
1605
},
1606
};
1607
1608
1609
/* xts functions */
1610
1611
static inline struct atmel_aes_xts_ctx *
1612
atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
1613
{
1614
return container_of(ctx, struct atmel_aes_xts_ctx, base);
1615
}
1616
1617
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
1618
1619
static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
1620
{
1621
struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
1622
struct skcipher_request *req = skcipher_request_cast(dd->areq);
1623
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1624
unsigned long flags;
1625
int err;
1626
1627
atmel_aes_set_mode(dd, rctx);
1628
1629
err = atmel_aes_hw_init(dd);
1630
if (err)
1631
return atmel_aes_complete(dd, err);
1632
1633
/* Compute the tweak value from req->iv with ecb(aes). */
1634
flags = dd->flags;
1635
dd->flags &= ~AES_FLAGS_MODE_MASK;
1636
dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1637
atmel_aes_write_ctrl_key(dd, false, NULL,
1638
ctx->key2, ctx->base.keylen);
1639
dd->flags = flags;
1640
1641
atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
1642
return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
1643
}
1644
1645
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
1646
{
1647
struct skcipher_request *req = skcipher_request_cast(dd->areq);
1648
bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
1649
u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
1650
static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
1651
u8 *tweak_bytes = (u8 *)tweak;
1652
int i;
1653
1654
/* Read the computed ciphered tweak value. */
1655
atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
1656
/*
1657
* Hardware quirk:
1658
* the order of the ciphered tweak bytes need to be reversed before
1659
* writing them into the ODATARx registers.
1660
*/
1661
for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
1662
swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
1663
1664
/* Process the data. */
1665
atmel_aes_write_ctrl(dd, use_dma, NULL);
1666
atmel_aes_write_block(dd, AES_TWR(0), tweak);
1667
atmel_aes_write_block(dd, AES_ALPHAR(0), one);
1668
if (use_dma)
1669
return atmel_aes_dma_start(dd, req->src, req->dst,
1670
req->cryptlen,
1671
atmel_aes_transfer_complete);
1672
1673
return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
1674
atmel_aes_transfer_complete);
1675
}
1676
1677
static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
1678
unsigned int keylen)
1679
{
1680
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1681
int err;
1682
1683
err = xts_verify_key(tfm, key, keylen);
1684
if (err)
1685
return err;
1686
1687
crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
1688
crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
1689
CRYPTO_TFM_REQ_MASK);
1690
err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
1691
if (err)
1692
return err;
1693
1694
memcpy(ctx->base.key, key, keylen/2);
1695
memcpy(ctx->key2, key + keylen/2, keylen/2);
1696
ctx->base.keylen = keylen/2;
1697
1698
return 0;
1699
}
1700
1701
static int atmel_aes_xts_encrypt(struct skcipher_request *req)
1702
{
1703
return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
1704
}
1705
1706
static int atmel_aes_xts_decrypt(struct skcipher_request *req)
1707
{
1708
return atmel_aes_crypt(req, AES_FLAGS_XTS);
1709
}
1710
1711
static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
1712
{
1713
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1714
struct atmel_aes_dev *dd;
1715
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1716
1717
dd = atmel_aes_dev_alloc(&ctx->base);
1718
if (!dd)
1719
return -ENODEV;
1720
1721
ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
1722
CRYPTO_ALG_NEED_FALLBACK);
1723
if (IS_ERR(ctx->fallback_tfm))
1724
return PTR_ERR(ctx->fallback_tfm);
1725
1726
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
1727
crypto_skcipher_reqsize(ctx->fallback_tfm));
1728
ctx->base.dd = dd;
1729
ctx->base.start = atmel_aes_xts_start;
1730
1731
return 0;
1732
}
1733
1734
static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
1735
{
1736
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1737
1738
crypto_free_skcipher(ctx->fallback_tfm);
1739
}
1740
1741
static struct skcipher_alg aes_xts_alg = {
1742
.base.cra_name = "xts(aes)",
1743
.base.cra_driver_name = "atmel-xts-aes",
1744
.base.cra_blocksize = AES_BLOCK_SIZE,
1745
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
1746
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1747
CRYPTO_ALG_KERN_DRIVER_ONLY,
1748
1749
.min_keysize = 2 * AES_MIN_KEY_SIZE,
1750
.max_keysize = 2 * AES_MAX_KEY_SIZE,
1751
.ivsize = AES_BLOCK_SIZE,
1752
.setkey = atmel_aes_xts_setkey,
1753
.encrypt = atmel_aes_xts_encrypt,
1754
.decrypt = atmel_aes_xts_decrypt,
1755
.init = atmel_aes_xts_init_tfm,
1756
.exit = atmel_aes_xts_exit_tfm,
1757
};
1758
1759
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
1760
/* authenc aead functions */
1761
1762
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
1763
static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1764
bool is_async);
1765
static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1766
bool is_async);
1767
static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
1768
static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1769
bool is_async);
1770
1771
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
1772
{
1773
struct aead_request *req = aead_request_cast(dd->areq);
1774
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1775
1776
if (err && (dd->flags & AES_FLAGS_OWN_SHA))
1777
atmel_sha_authenc_abort(&rctx->auth_req);
1778
dd->flags &= ~AES_FLAGS_OWN_SHA;
1779
}
1780
1781
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
1782
{
1783
struct aead_request *req = aead_request_cast(dd->areq);
1784
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1785
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1786
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1787
int err;
1788
1789
atmel_aes_set_mode(dd, &rctx->base);
1790
1791
err = atmel_aes_hw_init(dd);
1792
if (err)
1793
return atmel_aes_complete(dd, err);
1794
1795
return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
1796
atmel_aes_authenc_init, dd);
1797
}
1798
1799
static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1800
bool is_async)
1801
{
1802
struct aead_request *req = aead_request_cast(dd->areq);
1803
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1804
1805
if (is_async)
1806
dd->is_async = true;
1807
if (err)
1808
return atmel_aes_complete(dd, err);
1809
1810
/* If here, we've got the ownership of the SHA device. */
1811
dd->flags |= AES_FLAGS_OWN_SHA;
1812
1813
/* Configure the SHA device. */
1814
return atmel_sha_authenc_init(&rctx->auth_req,
1815
req->src, req->assoclen,
1816
rctx->textlen,
1817
atmel_aes_authenc_transfer, dd);
1818
}
1819
1820
static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1821
bool is_async)
1822
{
1823
struct aead_request *req = aead_request_cast(dd->areq);
1824
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1825
bool enc = atmel_aes_is_encrypt(dd);
1826
struct scatterlist *src, *dst;
1827
__be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
1828
u32 emr;
1829
1830
if (is_async)
1831
dd->is_async = true;
1832
if (err)
1833
return atmel_aes_complete(dd, err);
1834
1835
/* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
1836
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
1837
dst = src;
1838
1839
if (req->src != req->dst)
1840
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
1841
1842
/* Configure the AES device. */
1843
memcpy(iv, req->iv, sizeof(iv));
1844
1845
/*
1846
* Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
1847
* 'true' even if the data transfer is actually performed by the CPU (so
1848
* not by the DMA) because we must force the AES_MR_SMOD bitfield to the
1849
* value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
1850
* must be set to *_MR_SMOD_IDATAR0.
1851
*/
1852
atmel_aes_write_ctrl(dd, true, iv);
1853
emr = AES_EMR_PLIPEN;
1854
if (!enc)
1855
emr |= AES_EMR_PLIPD;
1856
atmel_aes_write(dd, AES_EMR, emr);
1857
1858
/* Transfer data. */
1859
return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
1860
atmel_aes_authenc_digest);
1861
}
1862
1863
static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
1864
{
1865
struct aead_request *req = aead_request_cast(dd->areq);
1866
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1867
1868
/* atmel_sha_authenc_final() releases the SHA device. */
1869
dd->flags &= ~AES_FLAGS_OWN_SHA;
1870
return atmel_sha_authenc_final(&rctx->auth_req,
1871
rctx->digest, sizeof(rctx->digest),
1872
atmel_aes_authenc_final, dd);
1873
}
1874
1875
static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1876
bool is_async)
1877
{
1878
struct aead_request *req = aead_request_cast(dd->areq);
1879
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1880
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881
bool enc = atmel_aes_is_encrypt(dd);
1882
u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
1883
u32 offs, authsize;
1884
1885
if (is_async)
1886
dd->is_async = true;
1887
if (err)
1888
goto complete;
1889
1890
offs = req->assoclen + rctx->textlen;
1891
authsize = crypto_aead_authsize(tfm);
1892
if (enc) {
1893
scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
1894
} else {
1895
scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
1896
if (crypto_memneq(idigest, odigest, authsize))
1897
err = -EBADMSG;
1898
}
1899
1900
complete:
1901
return atmel_aes_complete(dd, err);
1902
}
1903
1904
static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
1905
unsigned int keylen)
1906
{
1907
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1908
struct crypto_authenc_keys keys;
1909
int err;
1910
1911
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1912
goto badkey;
1913
1914
if (keys.enckeylen > sizeof(ctx->base.key))
1915
goto badkey;
1916
1917
/* Save auth key. */
1918
err = atmel_sha_authenc_setkey(ctx->auth,
1919
keys.authkey, keys.authkeylen,
1920
crypto_aead_get_flags(tfm));
1921
if (err) {
1922
memzero_explicit(&keys, sizeof(keys));
1923
return err;
1924
}
1925
1926
/* Save enc key. */
1927
ctx->base.keylen = keys.enckeylen;
1928
memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
1929
1930
memzero_explicit(&keys, sizeof(keys));
1931
return 0;
1932
1933
badkey:
1934
memzero_explicit(&keys, sizeof(keys));
1935
return -EINVAL;
1936
}
1937
1938
static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
1939
unsigned long auth_mode)
1940
{
1941
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1942
unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
1943
struct atmel_aes_dev *dd;
1944
1945
dd = atmel_aes_dev_alloc(&ctx->base);
1946
if (!dd)
1947
return -ENODEV;
1948
1949
ctx->auth = atmel_sha_authenc_spawn(auth_mode);
1950
if (IS_ERR(ctx->auth))
1951
return PTR_ERR(ctx->auth);
1952
1953
crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
1954
auth_reqsize));
1955
ctx->base.dd = dd;
1956
ctx->base.start = atmel_aes_authenc_start;
1957
1958
return 0;
1959
}
1960
1961
static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
1962
{
1963
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
1964
}
1965
1966
static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
1967
{
1968
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
1969
}
1970
1971
static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
1972
{
1973
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
1974
}
1975
1976
static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
1977
{
1978
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
1979
}
1980
1981
static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
1982
{
1983
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
1984
}
1985
1986
static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
1987
{
1988
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1989
1990
atmel_sha_authenc_free(ctx->auth);
1991
}
1992
1993
static int atmel_aes_authenc_crypt(struct aead_request *req,
1994
unsigned long mode)
1995
{
1996
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1997
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1998
struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1999
u32 authsize = crypto_aead_authsize(tfm);
2000
bool enc = (mode & AES_FLAGS_ENCRYPT);
2001
2002
/* Compute text length. */
2003
if (!enc && req->cryptlen < authsize)
2004
return -EINVAL;
2005
rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
2006
2007
/*
2008
* Currently, empty messages are not supported yet:
2009
* the SHA auto-padding can be used only on non-empty messages.
2010
* Hence a special case needs to be implemented for empty message.
2011
*/
2012
if (!rctx->textlen && !req->assoclen)
2013
return -EINVAL;
2014
2015
rctx->base.mode = mode;
2016
ctx->block_size = AES_BLOCK_SIZE;
2017
ctx->is_aead = true;
2018
2019
return atmel_aes_handle_queue(ctx->dd, &req->base);
2020
}
2021
2022
static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
2023
{
2024
return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
2025
}
2026
2027
static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
2028
{
2029
return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
2030
}
2031
2032
static struct aead_alg aes_authenc_algs[] = {
2033
{
2034
.setkey = atmel_aes_authenc_setkey,
2035
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2036
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2037
.init = atmel_aes_authenc_hmac_sha1_init_tfm,
2038
.exit = atmel_aes_authenc_exit_tfm,
2039
.ivsize = AES_BLOCK_SIZE,
2040
.maxauthsize = SHA1_DIGEST_SIZE,
2041
2042
.base = {
2043
.cra_name = "authenc(hmac(sha1),cbc(aes))",
2044
.cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
2045
.cra_blocksize = AES_BLOCK_SIZE,
2046
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2047
},
2048
},
2049
{
2050
.setkey = atmel_aes_authenc_setkey,
2051
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2052
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2053
.init = atmel_aes_authenc_hmac_sha224_init_tfm,
2054
.exit = atmel_aes_authenc_exit_tfm,
2055
.ivsize = AES_BLOCK_SIZE,
2056
.maxauthsize = SHA224_DIGEST_SIZE,
2057
2058
.base = {
2059
.cra_name = "authenc(hmac(sha224),cbc(aes))",
2060
.cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
2061
.cra_blocksize = AES_BLOCK_SIZE,
2062
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2063
},
2064
},
2065
{
2066
.setkey = atmel_aes_authenc_setkey,
2067
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2068
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2069
.init = atmel_aes_authenc_hmac_sha256_init_tfm,
2070
.exit = atmel_aes_authenc_exit_tfm,
2071
.ivsize = AES_BLOCK_SIZE,
2072
.maxauthsize = SHA256_DIGEST_SIZE,
2073
2074
.base = {
2075
.cra_name = "authenc(hmac(sha256),cbc(aes))",
2076
.cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
2077
.cra_blocksize = AES_BLOCK_SIZE,
2078
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2079
},
2080
},
2081
{
2082
.setkey = atmel_aes_authenc_setkey,
2083
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2084
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2085
.init = atmel_aes_authenc_hmac_sha384_init_tfm,
2086
.exit = atmel_aes_authenc_exit_tfm,
2087
.ivsize = AES_BLOCK_SIZE,
2088
.maxauthsize = SHA384_DIGEST_SIZE,
2089
2090
.base = {
2091
.cra_name = "authenc(hmac(sha384),cbc(aes))",
2092
.cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
2093
.cra_blocksize = AES_BLOCK_SIZE,
2094
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2095
},
2096
},
2097
{
2098
.setkey = atmel_aes_authenc_setkey,
2099
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2100
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2101
.init = atmel_aes_authenc_hmac_sha512_init_tfm,
2102
.exit = atmel_aes_authenc_exit_tfm,
2103
.ivsize = AES_BLOCK_SIZE,
2104
.maxauthsize = SHA512_DIGEST_SIZE,
2105
2106
.base = {
2107
.cra_name = "authenc(hmac(sha512),cbc(aes))",
2108
.cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
2109
.cra_blocksize = AES_BLOCK_SIZE,
2110
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2111
},
2112
},
2113
};
2114
#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2115
2116
/* Probe functions */
2117
2118
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
2119
{
2120
dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
2121
dd->buflen = ATMEL_AES_BUFFER_SIZE;
2122
dd->buflen &= ~(AES_BLOCK_SIZE - 1);
2123
2124
if (!dd->buf) {
2125
dev_err(dd->dev, "unable to alloc pages.\n");
2126
return -ENOMEM;
2127
}
2128
2129
return 0;
2130
}
2131
2132
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
2133
{
2134
free_page((unsigned long)dd->buf);
2135
}
2136
2137
static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
2138
{
2139
int ret;
2140
2141
/* Try to grab 2 DMA channels */
2142
dd->src.chan = dma_request_chan(dd->dev, "tx");
2143
if (IS_ERR(dd->src.chan)) {
2144
ret = PTR_ERR(dd->src.chan);
2145
goto err_dma_in;
2146
}
2147
2148
dd->dst.chan = dma_request_chan(dd->dev, "rx");
2149
if (IS_ERR(dd->dst.chan)) {
2150
ret = PTR_ERR(dd->dst.chan);
2151
goto err_dma_out;
2152
}
2153
2154
return 0;
2155
2156
err_dma_out:
2157
dma_release_channel(dd->src.chan);
2158
err_dma_in:
2159
dev_err(dd->dev, "no DMA channel available\n");
2160
return ret;
2161
}
2162
2163
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
2164
{
2165
dma_release_channel(dd->dst.chan);
2166
dma_release_channel(dd->src.chan);
2167
}
2168
2169
static void atmel_aes_queue_task(unsigned long data)
2170
{
2171
struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2172
2173
atmel_aes_handle_queue(dd, NULL);
2174
}
2175
2176
static void atmel_aes_done_task(unsigned long data)
2177
{
2178
struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2179
2180
dd->is_async = true;
2181
(void)dd->resume(dd);
2182
}
2183
2184
static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
2185
{
2186
struct atmel_aes_dev *aes_dd = dev_id;
2187
u32 reg;
2188
2189
reg = atmel_aes_read(aes_dd, AES_ISR);
2190
if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
2191
atmel_aes_write(aes_dd, AES_IDR, reg);
2192
if (AES_FLAGS_BUSY & aes_dd->flags)
2193
tasklet_schedule(&aes_dd->done_task);
2194
else
2195
dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
2196
return IRQ_HANDLED;
2197
}
2198
2199
return IRQ_NONE;
2200
}
2201
2202
static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
2203
{
2204
int i;
2205
2206
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2207
if (dd->caps.has_authenc)
2208
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
2209
crypto_unregister_aead(&aes_authenc_algs[i]);
2210
#endif
2211
2212
if (dd->caps.has_xts)
2213
crypto_unregister_skcipher(&aes_xts_alg);
2214
2215
if (dd->caps.has_gcm)
2216
crypto_unregister_aead(&aes_gcm_alg);
2217
2218
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
2219
crypto_unregister_skcipher(&aes_algs[i]);
2220
}
2221
2222
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
2223
{
2224
alg->cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2225
alg->cra_alignmask = 0xf;
2226
alg->cra_priority = ATMEL_AES_PRIORITY;
2227
alg->cra_module = THIS_MODULE;
2228
}
2229
2230
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
2231
{
2232
int err, i, j;
2233
2234
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
2235
atmel_aes_crypto_alg_init(&aes_algs[i].base);
2236
2237
err = crypto_register_skcipher(&aes_algs[i]);
2238
if (err)
2239
goto err_aes_algs;
2240
}
2241
2242
if (dd->caps.has_gcm) {
2243
atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
2244
2245
err = crypto_register_aead(&aes_gcm_alg);
2246
if (err)
2247
goto err_aes_gcm_alg;
2248
}
2249
2250
if (dd->caps.has_xts) {
2251
atmel_aes_crypto_alg_init(&aes_xts_alg.base);
2252
2253
err = crypto_register_skcipher(&aes_xts_alg);
2254
if (err)
2255
goto err_aes_xts_alg;
2256
}
2257
2258
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2259
if (dd->caps.has_authenc) {
2260
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
2261
atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
2262
2263
err = crypto_register_aead(&aes_authenc_algs[i]);
2264
if (err)
2265
goto err_aes_authenc_alg;
2266
}
2267
}
2268
#endif
2269
2270
return 0;
2271
2272
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2273
/* i = ARRAY_SIZE(aes_authenc_algs); */
2274
err_aes_authenc_alg:
2275
for (j = 0; j < i; j++)
2276
crypto_unregister_aead(&aes_authenc_algs[j]);
2277
crypto_unregister_skcipher(&aes_xts_alg);
2278
#endif
2279
err_aes_xts_alg:
2280
crypto_unregister_aead(&aes_gcm_alg);
2281
err_aes_gcm_alg:
2282
i = ARRAY_SIZE(aes_algs);
2283
err_aes_algs:
2284
for (j = 0; j < i; j++)
2285
crypto_unregister_skcipher(&aes_algs[j]);
2286
2287
return err;
2288
}
2289
2290
static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
2291
{
2292
dd->caps.has_dualbuff = 0;
2293
dd->caps.has_gcm = 0;
2294
dd->caps.has_xts = 0;
2295
dd->caps.has_authenc = 0;
2296
dd->caps.max_burst_size = 1;
2297
2298
/* keep only major version number */
2299
switch (dd->hw_version & 0xff0) {
2300
case 0x800:
2301
case 0x700:
2302
case 0x600:
2303
case 0x500:
2304
dd->caps.has_dualbuff = 1;
2305
dd->caps.has_gcm = 1;
2306
dd->caps.has_xts = 1;
2307
dd->caps.has_authenc = 1;
2308
dd->caps.max_burst_size = 4;
2309
break;
2310
case 0x200:
2311
dd->caps.has_dualbuff = 1;
2312
dd->caps.has_gcm = 1;
2313
dd->caps.max_burst_size = 4;
2314
break;
2315
case 0x130:
2316
dd->caps.has_dualbuff = 1;
2317
dd->caps.max_burst_size = 4;
2318
break;
2319
case 0x120:
2320
break;
2321
default:
2322
dev_warn(dd->dev,
2323
"Unmanaged aes version, set minimum capabilities\n");
2324
break;
2325
}
2326
}
2327
2328
static const struct of_device_id atmel_aes_dt_ids[] = {
2329
{ .compatible = "atmel,at91sam9g46-aes" },
2330
{ /* sentinel */ }
2331
};
2332
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
2333
2334
static int atmel_aes_probe(struct platform_device *pdev)
2335
{
2336
struct atmel_aes_dev *aes_dd;
2337
struct device *dev = &pdev->dev;
2338
struct resource *aes_res;
2339
int err;
2340
2341
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2342
if (!aes_dd)
2343
return -ENOMEM;
2344
2345
aes_dd->dev = dev;
2346
2347
platform_set_drvdata(pdev, aes_dd);
2348
2349
INIT_LIST_HEAD(&aes_dd->list);
2350
spin_lock_init(&aes_dd->lock);
2351
2352
tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2353
(unsigned long)aes_dd);
2354
tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2355
(unsigned long)aes_dd);
2356
2357
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2358
2359
aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
2360
if (IS_ERR(aes_dd->io_base)) {
2361
err = PTR_ERR(aes_dd->io_base);
2362
goto err_tasklet_kill;
2363
}
2364
aes_dd->phys_base = aes_res->start;
2365
2366
/* Get the IRQ */
2367
aes_dd->irq = platform_get_irq(pdev, 0);
2368
if (aes_dd->irq < 0) {
2369
err = aes_dd->irq;
2370
goto err_tasklet_kill;
2371
}
2372
2373
err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2374
IRQF_SHARED, "atmel-aes", aes_dd);
2375
if (err) {
2376
dev_err(dev, "unable to request aes irq.\n");
2377
goto err_tasklet_kill;
2378
}
2379
2380
/* Initializing the clock */
2381
aes_dd->iclk = devm_clk_get_prepared(&pdev->dev, "aes_clk");
2382
if (IS_ERR(aes_dd->iclk)) {
2383
dev_err(dev, "clock initialization failed.\n");
2384
err = PTR_ERR(aes_dd->iclk);
2385
goto err_tasklet_kill;
2386
}
2387
2388
err = atmel_aes_hw_version_init(aes_dd);
2389
if (err)
2390
goto err_tasklet_kill;
2391
2392
atmel_aes_get_cap(aes_dd);
2393
2394
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2395
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
2396
err = -EPROBE_DEFER;
2397
goto err_tasklet_kill;
2398
}
2399
#endif
2400
2401
err = atmel_aes_buff_init(aes_dd);
2402
if (err)
2403
goto err_tasklet_kill;
2404
2405
err = atmel_aes_dma_init(aes_dd);
2406
if (err)
2407
goto err_buff_cleanup;
2408
2409
spin_lock(&atmel_aes.lock);
2410
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2411
spin_unlock(&atmel_aes.lock);
2412
2413
err = atmel_aes_register_algs(aes_dd);
2414
if (err)
2415
goto err_algs;
2416
2417
dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2418
dma_chan_name(aes_dd->src.chan),
2419
dma_chan_name(aes_dd->dst.chan));
2420
2421
return 0;
2422
2423
err_algs:
2424
spin_lock(&atmel_aes.lock);
2425
list_del(&aes_dd->list);
2426
spin_unlock(&atmel_aes.lock);
2427
atmel_aes_dma_cleanup(aes_dd);
2428
err_buff_cleanup:
2429
atmel_aes_buff_cleanup(aes_dd);
2430
err_tasklet_kill:
2431
tasklet_kill(&aes_dd->done_task);
2432
tasklet_kill(&aes_dd->queue_task);
2433
2434
return err;
2435
}
2436
2437
static void atmel_aes_remove(struct platform_device *pdev)
2438
{
2439
struct atmel_aes_dev *aes_dd;
2440
2441
aes_dd = platform_get_drvdata(pdev);
2442
2443
spin_lock(&atmel_aes.lock);
2444
list_del(&aes_dd->list);
2445
spin_unlock(&atmel_aes.lock);
2446
2447
atmel_aes_unregister_algs(aes_dd);
2448
2449
tasklet_kill(&aes_dd->done_task);
2450
tasklet_kill(&aes_dd->queue_task);
2451
2452
atmel_aes_dma_cleanup(aes_dd);
2453
atmel_aes_buff_cleanup(aes_dd);
2454
}
2455
2456
static struct platform_driver atmel_aes_driver = {
2457
.probe = atmel_aes_probe,
2458
.remove = atmel_aes_remove,
2459
.driver = {
2460
.name = "atmel_aes",
2461
.of_match_table = atmel_aes_dt_ids,
2462
},
2463
};
2464
2465
module_platform_driver(atmel_aes_driver);
2466
2467
MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2468
MODULE_LICENSE("GPL v2");
2469
MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
2470
2471