Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/aspeed/aspeed-acry.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* Copyright 2021 Aspeed Technology Inc.
4
*/
5
#include <crypto/engine.h>
6
#include <crypto/internal/akcipher.h>
7
#include <crypto/internal/rsa.h>
8
#include <crypto/scatterwalk.h>
9
#include <linux/clk.h>
10
#include <linux/count_zeros.h>
11
#include <linux/dma-mapping.h>
12
#include <linux/err.h>
13
#include <linux/interrupt.h>
14
#include <linux/kernel.h>
15
#include <linux/mfd/syscon.h>
16
#include <linux/module.h>
17
#include <linux/of.h>
18
#include <linux/platform_device.h>
19
#include <linux/regmap.h>
20
#include <linux/slab.h>
21
#include <linux/string.h>
22
23
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
24
#define ACRY_DBG(d, fmt, ...) \
25
dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
26
#else
27
#define ACRY_DBG(d, fmt, ...) \
28
dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
29
#endif
30
31
/*****************************
32
* *
33
* ACRY register definitions *
34
* *
35
* ***************************/
36
#define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */
37
#define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */
38
#define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */
39
#define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */
40
#define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */
41
#define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */
42
#define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */
43
44
/* rsa trigger */
45
#define ACRY_CMD_RSA_TRIGGER BIT(0)
46
#define ACRY_CMD_DMA_RSA_TRIGGER BIT(1)
47
48
/* rsa dma cmd */
49
#define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4)
50
#define ACRY_CMD_DMEM_AHB BIT(8)
51
#define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0
52
53
/* rsa key len */
54
#define RSA_E_BITS_LEN(x) ((x) << 16)
55
#define RSA_M_BITS_LEN(x) (x)
56
57
/* acry isr */
58
#define ACRY_RSA_ISR BIT(1)
59
60
#define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */
61
#define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */
62
#define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */
63
64
#define CRYPTO_FLAGS_BUSY BIT(1)
65
#define BYTES_PER_DWORD 4
66
67
/*****************************
68
* *
69
* AHBC register definitions *
70
* *
71
* ***************************/
72
#define AHBC_REGION_PROT 0x240
73
#define REGION_ACRYM BIT(23)
74
75
#define ast_acry_write(acry, val, offset) \
76
writel((val), (acry)->regs + (offset))
77
78
#define ast_acry_read(acry, offset) \
79
readl((acry)->regs + (offset))
80
81
struct aspeed_acry_dev;
82
83
typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *);
84
85
struct aspeed_acry_dev {
86
void __iomem *regs;
87
struct device *dev;
88
int irq;
89
struct clk *clk;
90
struct regmap *ahbc;
91
92
struct akcipher_request *req;
93
struct tasklet_struct done_task;
94
aspeed_acry_fn_t resume;
95
unsigned long flags;
96
97
/* ACRY output SRAM buffer */
98
void __iomem *acry_sram;
99
100
/* ACRY input DMA buffer */
101
void *buf_addr;
102
dma_addr_t buf_dma_addr;
103
104
struct crypto_engine *crypt_engine_rsa;
105
106
/* ACRY SRAM memory mapped */
107
int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
108
int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
109
int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN];
110
};
111
112
struct aspeed_acry_ctx {
113
struct aspeed_acry_dev *acry_dev;
114
115
struct rsa_key key;
116
int enc;
117
u8 *n;
118
u8 *e;
119
u8 *d;
120
size_t n_sz;
121
size_t e_sz;
122
size_t d_sz;
123
124
aspeed_acry_fn_t trigger;
125
126
struct crypto_akcipher *fallback_tfm;
127
};
128
129
struct aspeed_acry_alg {
130
struct aspeed_acry_dev *acry_dev;
131
struct akcipher_engine_alg akcipher;
132
};
133
134
enum aspeed_rsa_key_mode {
135
ASPEED_RSA_EXP_MODE = 0,
136
ASPEED_RSA_MOD_MODE,
137
ASPEED_RSA_DATA_MODE,
138
};
139
140
static inline struct akcipher_request *
141
akcipher_request_cast(struct crypto_async_request *req)
142
{
143
return container_of(req, struct akcipher_request, base);
144
}
145
146
static int aspeed_acry_do_fallback(struct akcipher_request *req)
147
{
148
struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
149
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
150
int err;
151
152
akcipher_request_set_tfm(req, ctx->fallback_tfm);
153
154
if (ctx->enc)
155
err = crypto_akcipher_encrypt(req);
156
else
157
err = crypto_akcipher_decrypt(req);
158
159
akcipher_request_set_tfm(req, cipher);
160
161
return err;
162
}
163
164
static bool aspeed_acry_need_fallback(struct akcipher_request *req)
165
{
166
struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
167
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
168
169
return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN;
170
}
171
172
static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev,
173
struct akcipher_request *req)
174
{
175
if (aspeed_acry_need_fallback(req)) {
176
ACRY_DBG(acry_dev, "SW fallback\n");
177
return aspeed_acry_do_fallback(req);
178
}
179
180
return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req);
181
}
182
183
static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq)
184
{
185
struct akcipher_request *req = akcipher_request_cast(areq);
186
struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
187
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
188
struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
189
190
acry_dev->req = req;
191
acry_dev->flags |= CRYPTO_FLAGS_BUSY;
192
193
return ctx->trigger(acry_dev);
194
}
195
196
static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err)
197
{
198
struct akcipher_request *req = acry_dev->req;
199
200
acry_dev->flags &= ~CRYPTO_FLAGS_BUSY;
201
202
crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err);
203
204
return err;
205
}
206
207
/*
208
* Copy Data to DMA buffer for engine used.
209
*/
210
static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev,
211
u8 *buf, struct scatterlist *src,
212
size_t nbytes)
213
{
214
static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
215
int i = 0, j;
216
int data_idx;
217
218
ACRY_DBG(acry_dev, "\n");
219
220
scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0);
221
222
for (j = nbytes - 1; j >= 0; j--) {
223
data_idx = acry_dev->data_byte_mapping[i];
224
buf[data_idx] = dram_buffer[j];
225
i++;
226
}
227
228
for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) {
229
data_idx = acry_dev->data_byte_mapping[i];
230
buf[data_idx] = 0;
231
}
232
}
233
234
/*
235
* Copy Exp/Mod to DMA buffer for engine used.
236
*
237
* Params:
238
* - mode 0 : Exponential
239
* - mode 1 : Modulus
240
*
241
* Example:
242
* - DRAM memory layout:
243
* D[0], D[4], D[8], D[12]
244
* - ACRY SRAM memory layout should reverse the order of source data:
245
* D[12], D[8], D[4], D[0]
246
*/
247
static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
248
const void *xbuf, size_t nbytes,
249
enum aspeed_rsa_key_mode mode)
250
{
251
const u8 *src = xbuf;
252
__le32 *dw_buf = buf;
253
int nbits, ndw;
254
int i, j, idx;
255
u32 data = 0;
256
257
ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode);
258
259
if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN)
260
return -ENOMEM;
261
262
/* Remove the leading zeros */
263
while (nbytes > 0 && src[0] == 0) {
264
src++;
265
nbytes--;
266
}
267
268
nbits = nbytes * 8;
269
if (nbytes > 0)
270
nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8);
271
272
/* double-world alignment */
273
ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD);
274
275
if (nbytes > 0) {
276
i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD;
277
i %= BYTES_PER_DWORD;
278
279
for (j = ndw; j > 0; j--) {
280
for (; i < BYTES_PER_DWORD; i++) {
281
data <<= 8;
282
data |= *src++;
283
}
284
285
i = 0;
286
287
if (mode == ASPEED_RSA_EXP_MODE)
288
idx = acry_dev->exp_dw_mapping[j - 1];
289
else /* mode == ASPEED_RSA_MOD_MODE */
290
idx = acry_dev->mod_dw_mapping[j - 1];
291
292
dw_buf[idx] = cpu_to_le32(data);
293
}
294
}
295
296
return nbits;
297
}
298
299
static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev)
300
{
301
struct akcipher_request *req = acry_dev->req;
302
u8 __iomem *sram_buffer = acry_dev->acry_sram;
303
struct scatterlist *out_sg = req->dst;
304
static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
305
int leading_zero = 1;
306
int result_nbytes;
307
int i = 0, j;
308
int data_idx;
309
310
/* Set Data Memory to AHB(CPU) Access Mode */
311
ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
312
313
/* Disable ACRY SRAM protection */
314
regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
315
REGION_ACRYM, 0);
316
317
result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN;
318
319
for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) {
320
data_idx = acry_dev->data_byte_mapping[j];
321
if (readb(sram_buffer + data_idx) == 0 && leading_zero) {
322
result_nbytes--;
323
} else {
324
leading_zero = 0;
325
dram_buffer[i] = readb(sram_buffer + data_idx);
326
i++;
327
}
328
}
329
330
ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n",
331
result_nbytes, req->dst_len);
332
333
if (result_nbytes <= req->dst_len) {
334
scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes,
335
1);
336
req->dst_len = result_nbytes;
337
338
} else {
339
dev_err(acry_dev->dev, "RSA engine error!\n");
340
}
341
342
memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
343
344
return aspeed_acry_complete(acry_dev, 0);
345
}
346
347
static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev)
348
{
349
struct akcipher_request *req = acry_dev->req;
350
struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
351
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
352
int ne, nm;
353
354
if (!ctx->n || !ctx->n_sz) {
355
dev_err(acry_dev->dev, "%s: key n is not set\n", __func__);
356
return -EINVAL;
357
}
358
359
memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
360
361
/* Copy source data to DMA buffer */
362
aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr,
363
req->src, req->src_len);
364
365
nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n,
366
ctx->n_sz, ASPEED_RSA_MOD_MODE);
367
if (ctx->enc) {
368
if (!ctx->e || !ctx->e_sz) {
369
dev_err(acry_dev->dev, "%s: key e is not set\n",
370
__func__);
371
return -EINVAL;
372
}
373
/* Copy key e to DMA buffer */
374
ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
375
ctx->e, ctx->e_sz,
376
ASPEED_RSA_EXP_MODE);
377
} else {
378
if (!ctx->d || !ctx->d_sz) {
379
dev_err(acry_dev->dev, "%s: key d is not set\n",
380
__func__);
381
return -EINVAL;
382
}
383
/* Copy key d to DMA buffer */
384
ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
385
ctx->key.d, ctx->key.d_sz,
386
ASPEED_RSA_EXP_MODE);
387
}
388
389
ast_acry_write(acry_dev, acry_dev->buf_dma_addr,
390
ASPEED_ACRY_DMA_SRC_BASE);
391
ast_acry_write(acry_dev, (ne << 16) + nm,
392
ASPEED_ACRY_RSA_KEY_LEN);
393
ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE,
394
ASPEED_ACRY_DMA_LEN);
395
396
acry_dev->resume = aspeed_acry_rsa_transfer;
397
398
/* Enable ACRY SRAM protection */
399
regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
400
REGION_ACRYM, REGION_ACRYM);
401
402
ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK);
403
ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA |
404
ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD);
405
406
/* Trigger RSA engines */
407
ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER |
408
ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER);
409
410
return 0;
411
}
412
413
static int aspeed_acry_rsa_enc(struct akcipher_request *req)
414
{
415
struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
416
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
417
struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
418
419
ctx->trigger = aspeed_acry_rsa_trigger;
420
ctx->enc = 1;
421
422
return aspeed_acry_handle_queue(acry_dev, req);
423
}
424
425
static int aspeed_acry_rsa_dec(struct akcipher_request *req)
426
{
427
struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
428
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
429
struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
430
431
ctx->trigger = aspeed_acry_rsa_trigger;
432
ctx->enc = 0;
433
434
return aspeed_acry_handle_queue(acry_dev, req);
435
}
436
437
static u8 *aspeed_rsa_key_copy(u8 *src, size_t len)
438
{
439
return kmemdup(src, len, GFP_KERNEL);
440
}
441
442
static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value,
443
size_t len)
444
{
445
ctx->n_sz = len;
446
ctx->n = aspeed_rsa_key_copy(value, len);
447
if (!ctx->n)
448
return -ENOMEM;
449
450
return 0;
451
}
452
453
static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value,
454
size_t len)
455
{
456
ctx->e_sz = len;
457
ctx->e = aspeed_rsa_key_copy(value, len);
458
if (!ctx->e)
459
return -ENOMEM;
460
461
return 0;
462
}
463
464
static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value,
465
size_t len)
466
{
467
ctx->d_sz = len;
468
ctx->d = aspeed_rsa_key_copy(value, len);
469
if (!ctx->d)
470
return -ENOMEM;
471
472
return 0;
473
}
474
475
static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx)
476
{
477
kfree_sensitive(ctx->n);
478
kfree_sensitive(ctx->e);
479
kfree_sensitive(ctx->d);
480
ctx->n_sz = 0;
481
ctx->e_sz = 0;
482
ctx->d_sz = 0;
483
}
484
485
static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
486
unsigned int keylen, int priv)
487
{
488
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
489
struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
490
int ret;
491
492
if (priv)
493
ret = rsa_parse_priv_key(&ctx->key, key, keylen);
494
else
495
ret = rsa_parse_pub_key(&ctx->key, key, keylen);
496
497
if (ret) {
498
dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n",
499
ret);
500
return ret;
501
}
502
503
/* Aspeed engine supports up to 4096 bits,
504
* Use software fallback instead.
505
*/
506
if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
507
return 0;
508
509
ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz);
510
if (ret)
511
goto err;
512
513
ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz);
514
if (ret)
515
goto err;
516
517
if (priv) {
518
ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz);
519
if (ret)
520
goto err;
521
}
522
523
return 0;
524
525
err:
526
dev_err(acry_dev->dev, "rsa set key failed\n");
527
aspeed_rsa_key_free(ctx);
528
529
return ret;
530
}
531
532
static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm,
533
const void *key,
534
unsigned int keylen)
535
{
536
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
537
int ret;
538
539
ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen);
540
if (ret)
541
return ret;
542
543
return aspeed_acry_rsa_setkey(tfm, key, keylen, 0);
544
}
545
546
static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm,
547
const void *key,
548
unsigned int keylen)
549
{
550
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
551
int ret;
552
553
ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen);
554
if (ret)
555
return ret;
556
557
return aspeed_acry_rsa_setkey(tfm, key, keylen, 1);
558
}
559
560
static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm)
561
{
562
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
563
564
if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
565
return crypto_akcipher_maxsize(ctx->fallback_tfm);
566
567
return ctx->n_sz;
568
}
569
570
static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
571
{
572
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
573
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
574
const char *name = crypto_tfm_alg_name(&tfm->base);
575
struct aspeed_acry_alg *acry_alg;
576
577
acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base);
578
579
ctx->acry_dev = acry_alg->acry_dev;
580
581
ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC |
582
CRYPTO_ALG_NEED_FALLBACK);
583
if (IS_ERR(ctx->fallback_tfm)) {
584
dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
585
name, PTR_ERR(ctx->fallback_tfm));
586
return PTR_ERR(ctx->fallback_tfm);
587
}
588
589
return 0;
590
}
591
592
static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
593
{
594
struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
595
596
crypto_free_akcipher(ctx->fallback_tfm);
597
}
598
599
static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
600
{
601
.akcipher.base = {
602
.encrypt = aspeed_acry_rsa_enc,
603
.decrypt = aspeed_acry_rsa_dec,
604
.set_pub_key = aspeed_acry_rsa_set_pub_key,
605
.set_priv_key = aspeed_acry_rsa_set_priv_key,
606
.max_size = aspeed_acry_rsa_max_size,
607
.init = aspeed_acry_rsa_init_tfm,
608
.exit = aspeed_acry_rsa_exit_tfm,
609
.base = {
610
.cra_name = "rsa",
611
.cra_driver_name = "aspeed-rsa",
612
.cra_priority = 300,
613
.cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
614
CRYPTO_ALG_ASYNC |
615
CRYPTO_ALG_KERN_DRIVER_ONLY |
616
CRYPTO_ALG_NEED_FALLBACK,
617
.cra_module = THIS_MODULE,
618
.cra_ctxsize = sizeof(struct aspeed_acry_ctx),
619
},
620
},
621
.akcipher.op = {
622
.do_one_request = aspeed_acry_do_request,
623
},
624
},
625
};
626
627
static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
628
{
629
int i, rc;
630
631
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
632
aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
633
rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
634
if (rc) {
635
ACRY_DBG(acry_dev, "Failed to register %s\n",
636
aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name);
637
}
638
}
639
}
640
641
static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
642
{
643
int i;
644
645
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
646
crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
647
}
648
649
/* ACRY interrupt service routine. */
650
static irqreturn_t aspeed_acry_irq(int irq, void *dev)
651
{
652
struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev;
653
u32 sts;
654
655
sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS);
656
ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS);
657
658
ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts);
659
660
if (sts & ACRY_RSA_ISR) {
661
/* Stop RSA engine */
662
ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER);
663
664
if (acry_dev->flags & CRYPTO_FLAGS_BUSY)
665
tasklet_schedule(&acry_dev->done_task);
666
else
667
dev_err(acry_dev->dev, "RSA no active requests.\n");
668
}
669
670
return IRQ_HANDLED;
671
}
672
673
/*
674
* ACRY SRAM has its own memory layout.
675
* Set the DRAM to SRAM indexing for future used.
676
*/
677
static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev)
678
{
679
int i, j = 0;
680
681
for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) {
682
acry_dev->exp_dw_mapping[i] = j;
683
acry_dev->mod_dw_mapping[i] = j + 4;
684
acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4;
685
acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1;
686
acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2;
687
acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3;
688
j++;
689
j = j % 4 ? j : j + 8;
690
}
691
}
692
693
static void aspeed_acry_done_task(unsigned long data)
694
{
695
struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data;
696
697
(void)acry_dev->resume(acry_dev);
698
}
699
700
static const struct of_device_id aspeed_acry_of_matches[] = {
701
{ .compatible = "aspeed,ast2600-acry", },
702
{},
703
};
704
705
static int aspeed_acry_probe(struct platform_device *pdev)
706
{
707
struct aspeed_acry_dev *acry_dev;
708
struct device *dev = &pdev->dev;
709
int rc;
710
711
acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
712
GFP_KERNEL);
713
if (!acry_dev)
714
return -ENOMEM;
715
716
acry_dev->dev = dev;
717
718
platform_set_drvdata(pdev, acry_dev);
719
720
acry_dev->regs = devm_platform_ioremap_resource(pdev, 0);
721
if (IS_ERR(acry_dev->regs))
722
return PTR_ERR(acry_dev->regs);
723
724
acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1);
725
if (IS_ERR(acry_dev->acry_sram))
726
return PTR_ERR(acry_dev->acry_sram);
727
728
/* Get irq number and register it */
729
acry_dev->irq = platform_get_irq(pdev, 0);
730
if (acry_dev->irq < 0)
731
return -ENXIO;
732
733
rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0,
734
dev_name(dev), acry_dev);
735
if (rc) {
736
dev_err(dev, "Failed to request irq.\n");
737
return rc;
738
}
739
740
acry_dev->clk = devm_clk_get_enabled(dev, NULL);
741
if (IS_ERR(acry_dev->clk)) {
742
dev_err(dev, "Failed to get acry clk\n");
743
return PTR_ERR(acry_dev->clk);
744
}
745
746
acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node,
747
"aspeed,ahbc");
748
if (IS_ERR(acry_dev->ahbc)) {
749
dev_err(dev, "Failed to get AHBC regmap\n");
750
return -ENODEV;
751
}
752
753
/* Initialize crypto hardware engine structure for RSA */
754
acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true);
755
if (!acry_dev->crypt_engine_rsa) {
756
rc = -ENOMEM;
757
goto clk_exit;
758
}
759
760
rc = crypto_engine_start(acry_dev->crypt_engine_rsa);
761
if (rc)
762
goto err_engine_rsa_start;
763
764
tasklet_init(&acry_dev->done_task, aspeed_acry_done_task,
765
(unsigned long)acry_dev);
766
767
/* Set Data Memory to AHB(CPU) Access Mode */
768
ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
769
770
/* Initialize ACRY SRAM index */
771
aspeed_acry_sram_mapping(acry_dev);
772
773
acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
774
&acry_dev->buf_dma_addr,
775
GFP_KERNEL);
776
if (!acry_dev->buf_addr) {
777
rc = -ENOMEM;
778
goto err_engine_rsa_start;
779
}
780
781
aspeed_acry_register(acry_dev);
782
783
dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n");
784
785
return 0;
786
787
err_engine_rsa_start:
788
crypto_engine_exit(acry_dev->crypt_engine_rsa);
789
clk_exit:
790
clk_disable_unprepare(acry_dev->clk);
791
792
return rc;
793
}
794
795
static void aspeed_acry_remove(struct platform_device *pdev)
796
{
797
struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev);
798
799
aspeed_acry_unregister(acry_dev);
800
crypto_engine_exit(acry_dev->crypt_engine_rsa);
801
tasklet_kill(&acry_dev->done_task);
802
clk_disable_unprepare(acry_dev->clk);
803
}
804
805
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
806
807
static struct platform_driver aspeed_acry_driver = {
808
.probe = aspeed_acry_probe,
809
.remove = aspeed_acry_remove,
810
.driver = {
811
.name = KBUILD_MODNAME,
812
.of_match_table = aspeed_acry_of_matches,
813
},
814
};
815
816
module_platform_driver(aspeed_acry_driver);
817
818
MODULE_AUTHOR("Neal Liu <[email protected]>");
819
MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine");
820
MODULE_LICENSE("GPL");
821
822