Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/marvell/cesa/cesa.c
26288 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
4
* that can be found on the following platform: Orion, Kirkwood, Armada. This
5
* driver supports the TDMA engine on platforms on which it is available.
6
*
7
* Author: Boris Brezillon <[email protected]>
8
* Author: Arnaud Ebalard <[email protected]>
9
*
10
* This work is based on an initial version written by
11
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12
*/
13
14
#include <linux/delay.h>
15
#include <linux/dma-mapping.h>
16
#include <linux/genalloc.h>
17
#include <linux/interrupt.h>
18
#include <linux/io.h>
19
#include <linux/kthread.h>
20
#include <linux/mbus.h>
21
#include <linux/platform_device.h>
22
#include <linux/scatterlist.h>
23
#include <linux/slab.h>
24
#include <linux/module.h>
25
#include <linux/clk.h>
26
#include <linux/of.h>
27
#include <linux/of_platform.h>
28
#include <linux/of_irq.h>
29
30
#include "cesa.h"
31
32
/* Limit of the crypto queue before reaching the backlog */
33
#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
34
35
struct mv_cesa_dev *cesa_dev;
36
37
struct crypto_async_request *
38
mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
39
struct crypto_async_request **backlog)
40
{
41
struct crypto_async_request *req;
42
43
*backlog = crypto_get_backlog(&engine->queue);
44
req = crypto_dequeue_request(&engine->queue);
45
46
if (!req)
47
return NULL;
48
49
return req;
50
}
51
52
static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
53
{
54
struct crypto_async_request *req = NULL, *backlog = NULL;
55
struct mv_cesa_ctx *ctx;
56
57
58
spin_lock_bh(&engine->lock);
59
if (!engine->req) {
60
req = mv_cesa_dequeue_req_locked(engine, &backlog);
61
engine->req = req;
62
}
63
spin_unlock_bh(&engine->lock);
64
65
if (!req)
66
return;
67
68
if (backlog)
69
crypto_request_complete(backlog, -EINPROGRESS);
70
71
ctx = crypto_tfm_ctx(req->tfm);
72
ctx->ops->step(req);
73
}
74
75
static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
76
{
77
struct crypto_async_request *req;
78
struct mv_cesa_ctx *ctx;
79
int res;
80
81
req = engine->req;
82
ctx = crypto_tfm_ctx(req->tfm);
83
res = ctx->ops->process(req, status);
84
85
if (res == 0) {
86
ctx->ops->complete(req);
87
mv_cesa_engine_enqueue_complete_request(engine, req);
88
} else if (res == -EINPROGRESS) {
89
ctx->ops->step(req);
90
}
91
92
return res;
93
}
94
95
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
96
{
97
if (engine->chain_hw.first && engine->chain_hw.last)
98
return mv_cesa_tdma_process(engine, status);
99
100
return mv_cesa_std_process(engine, status);
101
}
102
103
static inline void
104
mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
105
int res)
106
{
107
ctx->ops->cleanup(req);
108
local_bh_disable();
109
crypto_request_complete(req, res);
110
local_bh_enable();
111
}
112
113
static irqreturn_t mv_cesa_int(int irq, void *priv)
114
{
115
struct mv_cesa_engine *engine = priv;
116
struct crypto_async_request *req;
117
struct mv_cesa_ctx *ctx;
118
u32 status, mask;
119
irqreturn_t ret = IRQ_NONE;
120
121
while (true) {
122
int res;
123
124
mask = mv_cesa_get_int_mask(engine);
125
status = readl(engine->regs + CESA_SA_INT_STATUS);
126
127
if (!(status & mask))
128
break;
129
130
/*
131
* TODO: avoid clearing the FPGA_INT_STATUS if this not
132
* relevant on some platforms.
133
*/
134
writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
135
writel(~status, engine->regs + CESA_SA_INT_STATUS);
136
137
/* Process fetched requests */
138
res = mv_cesa_int_process(engine, status & mask);
139
ret = IRQ_HANDLED;
140
141
spin_lock_bh(&engine->lock);
142
req = engine->req;
143
if (res != -EINPROGRESS)
144
engine->req = NULL;
145
spin_unlock_bh(&engine->lock);
146
147
ctx = crypto_tfm_ctx(req->tfm);
148
149
if (res && res != -EINPROGRESS)
150
mv_cesa_complete_req(ctx, req, res);
151
152
/* Launch the next pending request */
153
mv_cesa_rearm_engine(engine);
154
155
/* Iterate over the complete queue */
156
while (true) {
157
req = mv_cesa_engine_dequeue_complete_request(engine);
158
if (!req)
159
break;
160
161
ctx = crypto_tfm_ctx(req->tfm);
162
mv_cesa_complete_req(ctx, req, 0);
163
}
164
}
165
166
return ret;
167
}
168
169
int mv_cesa_queue_req(struct crypto_async_request *req,
170
struct mv_cesa_req *creq)
171
{
172
int ret;
173
struct mv_cesa_engine *engine = creq->engine;
174
175
spin_lock_bh(&engine->lock);
176
ret = crypto_enqueue_request(&engine->queue, req);
177
if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
178
(ret == -EINPROGRESS || ret == -EBUSY))
179
mv_cesa_tdma_chain(engine, creq);
180
spin_unlock_bh(&engine->lock);
181
182
if (ret != -EINPROGRESS)
183
return ret;
184
185
mv_cesa_rearm_engine(engine);
186
187
return -EINPROGRESS;
188
}
189
190
static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
191
{
192
int ret;
193
int i, j;
194
195
for (i = 0; i < cesa->caps->ncipher_algs; i++) {
196
ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
197
if (ret)
198
goto err_unregister_crypto;
199
}
200
201
for (i = 0; i < cesa->caps->nahash_algs; i++) {
202
ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
203
if (ret)
204
goto err_unregister_ahash;
205
}
206
207
return 0;
208
209
err_unregister_ahash:
210
for (j = 0; j < i; j++)
211
crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
212
i = cesa->caps->ncipher_algs;
213
214
err_unregister_crypto:
215
for (j = 0; j < i; j++)
216
crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
217
218
return ret;
219
}
220
221
static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
222
{
223
int i;
224
225
for (i = 0; i < cesa->caps->nahash_algs; i++)
226
crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
227
228
for (i = 0; i < cesa->caps->ncipher_algs; i++)
229
crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
230
}
231
232
static struct skcipher_alg *orion_cipher_algs[] = {
233
&mv_cesa_ecb_des_alg,
234
&mv_cesa_cbc_des_alg,
235
&mv_cesa_ecb_des3_ede_alg,
236
&mv_cesa_cbc_des3_ede_alg,
237
&mv_cesa_ecb_aes_alg,
238
&mv_cesa_cbc_aes_alg,
239
};
240
241
static struct ahash_alg *orion_ahash_algs[] = {
242
&mv_md5_alg,
243
&mv_sha1_alg,
244
&mv_ahmac_md5_alg,
245
&mv_ahmac_sha1_alg,
246
};
247
248
static struct skcipher_alg *armada_370_cipher_algs[] = {
249
&mv_cesa_ecb_des_alg,
250
&mv_cesa_cbc_des_alg,
251
&mv_cesa_ecb_des3_ede_alg,
252
&mv_cesa_cbc_des3_ede_alg,
253
&mv_cesa_ecb_aes_alg,
254
&mv_cesa_cbc_aes_alg,
255
};
256
257
static struct ahash_alg *armada_370_ahash_algs[] = {
258
&mv_md5_alg,
259
&mv_sha1_alg,
260
&mv_sha256_alg,
261
&mv_ahmac_md5_alg,
262
&mv_ahmac_sha1_alg,
263
&mv_ahmac_sha256_alg,
264
};
265
266
static const struct mv_cesa_caps orion_caps = {
267
.nengines = 1,
268
.cipher_algs = orion_cipher_algs,
269
.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
270
.ahash_algs = orion_ahash_algs,
271
.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
272
.has_tdma = false,
273
};
274
275
static const struct mv_cesa_caps kirkwood_caps = {
276
.nengines = 1,
277
.cipher_algs = orion_cipher_algs,
278
.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
279
.ahash_algs = orion_ahash_algs,
280
.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
281
.has_tdma = true,
282
};
283
284
static const struct mv_cesa_caps armada_370_caps = {
285
.nengines = 1,
286
.cipher_algs = armada_370_cipher_algs,
287
.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
288
.ahash_algs = armada_370_ahash_algs,
289
.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
290
.has_tdma = true,
291
};
292
293
static const struct mv_cesa_caps armada_xp_caps = {
294
.nengines = 2,
295
.cipher_algs = armada_370_cipher_algs,
296
.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
297
.ahash_algs = armada_370_ahash_algs,
298
.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
299
.has_tdma = true,
300
};
301
302
static const struct of_device_id mv_cesa_of_match_table[] = {
303
{ .compatible = "marvell,orion-crypto", .data = &orion_caps },
304
{ .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
305
{ .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
306
{ .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
307
{ .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
308
{ .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
309
{ .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
310
{}
311
};
312
MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
313
314
static void
315
mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
316
const struct mbus_dram_target_info *dram)
317
{
318
void __iomem *iobase = engine->regs;
319
int i;
320
321
for (i = 0; i < 4; i++) {
322
writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
323
writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
324
}
325
326
for (i = 0; i < dram->num_cs; i++) {
327
const struct mbus_dram_window *cs = dram->cs + i;
328
329
writel(((cs->size - 1) & 0xffff0000) |
330
(cs->mbus_attr << 8) |
331
(dram->mbus_dram_target_id << 4) | 1,
332
iobase + CESA_TDMA_WINDOW_CTRL(i));
333
writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
334
}
335
}
336
337
static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
338
{
339
struct device *dev = cesa->dev;
340
struct mv_cesa_dev_dma *dma;
341
342
if (!cesa->caps->has_tdma)
343
return 0;
344
345
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
346
if (!dma)
347
return -ENOMEM;
348
349
dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
350
sizeof(struct mv_cesa_tdma_desc),
351
16, 0);
352
if (!dma->tdma_desc_pool)
353
return -ENOMEM;
354
355
dma->op_pool = dmam_pool_create("cesa_op", dev,
356
sizeof(struct mv_cesa_op_ctx), 16, 0);
357
if (!dma->op_pool)
358
return -ENOMEM;
359
360
dma->cache_pool = dmam_pool_create("cesa_cache", dev,
361
CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
362
if (!dma->cache_pool)
363
return -ENOMEM;
364
365
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
366
if (!dma->padding_pool)
367
return -ENOMEM;
368
369
cesa->dma = dma;
370
371
return 0;
372
}
373
374
static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
375
{
376
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
377
struct mv_cesa_engine *engine = &cesa->engines[idx];
378
struct resource *res;
379
380
engine->pool = of_gen_pool_get(cesa->dev->of_node,
381
"marvell,crypto-srams", idx);
382
if (engine->pool) {
383
engine->sram_pool = gen_pool_dma_alloc(engine->pool,
384
cesa->sram_size,
385
&engine->sram_dma);
386
if (engine->sram_pool)
387
return 0;
388
389
engine->pool = NULL;
390
return -ENOMEM;
391
}
392
393
engine->sram = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
394
if (IS_ERR(engine->sram))
395
return PTR_ERR(engine->sram);
396
397
engine->sram_dma = dma_map_resource(cesa->dev, res->start,
398
cesa->sram_size,
399
DMA_BIDIRECTIONAL, 0);
400
if (dma_mapping_error(cesa->dev, engine->sram_dma))
401
return -ENOMEM;
402
403
return 0;
404
}
405
406
static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
407
{
408
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
409
struct mv_cesa_engine *engine = &cesa->engines[idx];
410
411
if (engine->pool)
412
gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
413
cesa->sram_size);
414
else
415
dma_unmap_resource(cesa->dev, engine->sram_dma,
416
cesa->sram_size, DMA_BIDIRECTIONAL, 0);
417
}
418
419
static int mv_cesa_probe(struct platform_device *pdev)
420
{
421
const struct mv_cesa_caps *caps = &orion_caps;
422
const struct mbus_dram_target_info *dram;
423
const struct of_device_id *match;
424
struct device *dev = &pdev->dev;
425
struct mv_cesa_dev *cesa;
426
struct mv_cesa_engine *engines;
427
int irq, ret, i, cpu;
428
u32 sram_size;
429
430
if (cesa_dev) {
431
dev_err(&pdev->dev, "Only one CESA device authorized\n");
432
return -EEXIST;
433
}
434
435
if (dev->of_node) {
436
match = of_match_node(mv_cesa_of_match_table, dev->of_node);
437
if (!match || !match->data)
438
return -ENOTSUPP;
439
440
caps = match->data;
441
}
442
443
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
444
if (!cesa)
445
return -ENOMEM;
446
447
cesa->caps = caps;
448
cesa->dev = dev;
449
450
sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
451
of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
452
&sram_size);
453
if (sram_size < CESA_SA_MIN_SRAM_SIZE)
454
sram_size = CESA_SA_MIN_SRAM_SIZE;
455
456
cesa->sram_size = sram_size;
457
cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
458
GFP_KERNEL);
459
if (!cesa->engines)
460
return -ENOMEM;
461
462
spin_lock_init(&cesa->lock);
463
464
cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
465
if (IS_ERR(cesa->regs))
466
return PTR_ERR(cesa->regs);
467
468
ret = mv_cesa_dev_dma_init(cesa);
469
if (ret)
470
return ret;
471
472
dram = mv_mbus_dram_info_nooverlap();
473
474
platform_set_drvdata(pdev, cesa);
475
476
for (i = 0; i < caps->nengines; i++) {
477
struct mv_cesa_engine *engine = &cesa->engines[i];
478
char res_name[16];
479
480
engine->id = i;
481
spin_lock_init(&engine->lock);
482
483
ret = mv_cesa_get_sram(pdev, i);
484
if (ret)
485
goto err_cleanup;
486
487
irq = platform_get_irq(pdev, i);
488
if (irq < 0) {
489
ret = irq;
490
goto err_cleanup;
491
}
492
493
engine->irq = irq;
494
495
/*
496
* Not all platforms can gate the CESA clocks: do not complain
497
* if the clock does not exist.
498
*/
499
snprintf(res_name, sizeof(res_name), "cesa%u", i);
500
engine->clk = devm_clk_get_optional_enabled(dev, res_name);
501
if (IS_ERR(engine->clk)) {
502
engine->clk = devm_clk_get_optional_enabled(dev, NULL);
503
if (IS_ERR(engine->clk)) {
504
ret = PTR_ERR(engine->clk);
505
goto err_cleanup;
506
}
507
}
508
509
snprintf(res_name, sizeof(res_name), "cesaz%u", i);
510
engine->zclk = devm_clk_get_optional_enabled(dev, res_name);
511
if (IS_ERR(engine->zclk)) {
512
ret = PTR_ERR(engine->zclk);
513
goto err_cleanup;
514
}
515
516
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
517
518
if (dram && cesa->caps->has_tdma)
519
mv_cesa_conf_mbus_windows(engine, dram);
520
521
writel(0, engine->regs + CESA_SA_INT_STATUS);
522
writel(CESA_SA_CFG_STOP_DIG_ERR,
523
engine->regs + CESA_SA_CFG);
524
writel(engine->sram_dma & CESA_SA_SRAM_MSK,
525
engine->regs + CESA_SA_DESC_P0);
526
527
ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
528
IRQF_ONESHOT,
529
dev_name(&pdev->dev),
530
engine);
531
if (ret)
532
goto err_cleanup;
533
534
/* Set affinity */
535
cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
536
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
537
538
crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
539
atomic_set(&engine->load, 0);
540
INIT_LIST_HEAD(&engine->complete_queue);
541
}
542
543
cesa_dev = cesa;
544
545
ret = mv_cesa_add_algs(cesa);
546
if (ret) {
547
cesa_dev = NULL;
548
goto err_cleanup;
549
}
550
551
dev_info(dev, "CESA device successfully registered\n");
552
553
return 0;
554
555
err_cleanup:
556
for (i = 0; i < caps->nengines; i++)
557
mv_cesa_put_sram(pdev, i);
558
559
return ret;
560
}
561
562
static void mv_cesa_remove(struct platform_device *pdev)
563
{
564
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
565
int i;
566
567
mv_cesa_remove_algs(cesa);
568
569
for (i = 0; i < cesa->caps->nengines; i++)
570
mv_cesa_put_sram(pdev, i);
571
}
572
573
static const struct platform_device_id mv_cesa_plat_id_table[] = {
574
{ .name = "mv_crypto" },
575
{ /* sentinel */ },
576
};
577
MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
578
579
static struct platform_driver marvell_cesa = {
580
.probe = mv_cesa_probe,
581
.remove = mv_cesa_remove,
582
.id_table = mv_cesa_plat_id_table,
583
.driver = {
584
.name = "marvell-cesa",
585
.of_match_table = mv_cesa_of_match_table,
586
},
587
};
588
module_platform_driver(marvell_cesa);
589
590
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
591
MODULE_AUTHOR("Arnaud Ebalard <[email protected]>");
592
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
593
MODULE_LICENSE("GPL v2");
594
595