Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
51764 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Crypto acceleration support for Rockchip RK3288
4
*
5
* Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6
*
7
* Author: Zain Wang <[email protected]>
8
*
9
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
10
*/
11
12
#include <crypto/engine.h>
13
#include <crypto/internal/skcipher.h>
14
#include <crypto/scatterwalk.h>
15
#include <linux/device.h>
16
#include <linux/err.h>
17
#include <linux/kernel.h>
18
#include <linux/string.h>
19
#include "rk3288_crypto.h"
20
21
#define RK_CRYPTO_DEC BIT(0)
22
23
static int rk_cipher_need_fallback(struct skcipher_request *req)
24
{
25
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
26
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
27
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
28
struct scatterlist *sgs, *sgd;
29
unsigned int stodo, dtodo, len;
30
unsigned int bs = crypto_skcipher_blocksize(tfm);
31
32
if (!req->cryptlen)
33
return true;
34
35
len = req->cryptlen;
36
sgs = req->src;
37
sgd = req->dst;
38
while (sgs && sgd) {
39
if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
40
algt->stat_fb_align++;
41
return true;
42
}
43
if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
44
algt->stat_fb_align++;
45
return true;
46
}
47
stodo = min(len, sgs->length);
48
if (stodo % bs) {
49
algt->stat_fb_len++;
50
return true;
51
}
52
dtodo = min(len, sgd->length);
53
if (dtodo % bs) {
54
algt->stat_fb_len++;
55
return true;
56
}
57
if (stodo != dtodo) {
58
algt->stat_fb_sgdiff++;
59
return true;
60
}
61
len -= stodo;
62
sgs = sg_next(sgs);
63
sgd = sg_next(sgd);
64
}
65
return false;
66
}
67
68
static int rk_cipher_fallback(struct skcipher_request *areq)
69
{
70
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
71
struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
72
struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
73
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
74
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
75
int err;
76
77
algt->stat_fb++;
78
79
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
80
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
81
areq->base.complete, areq->base.data);
82
skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
83
areq->cryptlen, areq->iv);
84
if (rctx->mode & RK_CRYPTO_DEC)
85
err = crypto_skcipher_decrypt(&rctx->fallback_req);
86
else
87
err = crypto_skcipher_encrypt(&rctx->fallback_req);
88
return err;
89
}
90
91
static int rk_cipher_handle_req(struct skcipher_request *req)
92
{
93
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
94
struct rk_crypto_info *rkc;
95
struct crypto_engine *engine;
96
97
if (rk_cipher_need_fallback(req))
98
return rk_cipher_fallback(req);
99
100
rkc = get_rk_crypto();
101
102
engine = rkc->engine;
103
rctx->dev = rkc;
104
105
return crypto_transfer_skcipher_request_to_engine(engine, req);
106
}
107
108
static int rk_aes_setkey(struct crypto_skcipher *cipher,
109
const u8 *key, unsigned int keylen)
110
{
111
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
112
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
113
114
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
115
keylen != AES_KEYSIZE_256)
116
return -EINVAL;
117
ctx->keylen = keylen;
118
memcpy(ctx->key, key, keylen);
119
120
return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
121
}
122
123
static int rk_des_setkey(struct crypto_skcipher *cipher,
124
const u8 *key, unsigned int keylen)
125
{
126
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
127
int err;
128
129
err = verify_skcipher_des_key(cipher, key);
130
if (err)
131
return err;
132
133
ctx->keylen = keylen;
134
memcpy(ctx->key, key, keylen);
135
136
return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
137
}
138
139
static int rk_tdes_setkey(struct crypto_skcipher *cipher,
140
const u8 *key, unsigned int keylen)
141
{
142
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
143
int err;
144
145
err = verify_skcipher_des3_key(cipher, key);
146
if (err)
147
return err;
148
149
ctx->keylen = keylen;
150
memcpy(ctx->key, key, keylen);
151
152
return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
153
}
154
155
static int rk_aes_ecb_encrypt(struct skcipher_request *req)
156
{
157
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
158
159
rctx->mode = RK_CRYPTO_AES_ECB_MODE;
160
return rk_cipher_handle_req(req);
161
}
162
163
static int rk_aes_ecb_decrypt(struct skcipher_request *req)
164
{
165
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
166
167
rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
168
return rk_cipher_handle_req(req);
169
}
170
171
static int rk_aes_cbc_encrypt(struct skcipher_request *req)
172
{
173
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
174
175
rctx->mode = RK_CRYPTO_AES_CBC_MODE;
176
return rk_cipher_handle_req(req);
177
}
178
179
static int rk_aes_cbc_decrypt(struct skcipher_request *req)
180
{
181
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
182
183
rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
184
return rk_cipher_handle_req(req);
185
}
186
187
static int rk_des_ecb_encrypt(struct skcipher_request *req)
188
{
189
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
190
191
rctx->mode = 0;
192
return rk_cipher_handle_req(req);
193
}
194
195
static int rk_des_ecb_decrypt(struct skcipher_request *req)
196
{
197
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
198
199
rctx->mode = RK_CRYPTO_DEC;
200
return rk_cipher_handle_req(req);
201
}
202
203
static int rk_des_cbc_encrypt(struct skcipher_request *req)
204
{
205
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
206
207
rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
208
return rk_cipher_handle_req(req);
209
}
210
211
static int rk_des_cbc_decrypt(struct skcipher_request *req)
212
{
213
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
214
215
rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
216
return rk_cipher_handle_req(req);
217
}
218
219
static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
220
{
221
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
222
223
rctx->mode = RK_CRYPTO_TDES_SELECT;
224
return rk_cipher_handle_req(req);
225
}
226
227
static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
228
{
229
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
230
231
rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
232
return rk_cipher_handle_req(req);
233
}
234
235
static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
236
{
237
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
238
239
rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
240
return rk_cipher_handle_req(req);
241
}
242
243
static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
244
{
245
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
246
247
rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
248
RK_CRYPTO_DEC;
249
return rk_cipher_handle_req(req);
250
}
251
252
static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
253
{
254
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
255
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
256
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
257
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
258
u32 block, conf_reg = 0;
259
260
block = crypto_tfm_alg_blocksize(tfm);
261
262
if (block == DES_BLOCK_SIZE) {
263
rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
264
RK_CRYPTO_TDES_BYTESWAP_KEY |
265
RK_CRYPTO_TDES_BYTESWAP_IV;
266
CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
267
memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
268
conf_reg = RK_CRYPTO_DESSEL;
269
} else {
270
rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
271
RK_CRYPTO_AES_KEY_CHANGE |
272
RK_CRYPTO_AES_BYTESWAP_KEY |
273
RK_CRYPTO_AES_BYTESWAP_IV;
274
if (ctx->keylen == AES_KEYSIZE_192)
275
rctx->mode |= RK_CRYPTO_AES_192BIT_key;
276
else if (ctx->keylen == AES_KEYSIZE_256)
277
rctx->mode |= RK_CRYPTO_AES_256BIT_key;
278
CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
279
memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
280
}
281
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
282
RK_CRYPTO_BYTESWAP_BRFIFO;
283
CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
284
CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
285
RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
286
}
287
288
static void crypto_dma_start(struct rk_crypto_info *dev,
289
struct scatterlist *sgs,
290
struct scatterlist *sgd, unsigned int todo)
291
{
292
CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
293
CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
294
CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
295
CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
296
_SBF(RK_CRYPTO_BLOCK_START, 16));
297
}
298
299
static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
300
{
301
struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
302
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
303
struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
304
struct scatterlist *sgs, *sgd;
305
int err = 0;
306
int ivsize = crypto_skcipher_ivsize(tfm);
307
int offset;
308
u8 iv[AES_BLOCK_SIZE];
309
u8 biv[AES_BLOCK_SIZE];
310
u8 *ivtouse = areq->iv;
311
unsigned int len = areq->cryptlen;
312
unsigned int todo;
313
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
314
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
315
struct rk_crypto_info *rkc = rctx->dev;
316
317
err = pm_runtime_resume_and_get(rkc->dev);
318
if (err)
319
return err;
320
321
algt->stat_req++;
322
rkc->nreq++;
323
324
if (areq->iv && ivsize > 0) {
325
if (rctx->mode & RK_CRYPTO_DEC) {
326
offset = areq->cryptlen - ivsize;
327
scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
328
offset, ivsize, 0);
329
}
330
}
331
332
sgs = areq->src;
333
sgd = areq->dst;
334
335
while (sgs && sgd && len) {
336
if (!sgs->length) {
337
sgs = sg_next(sgs);
338
sgd = sg_next(sgd);
339
continue;
340
}
341
if (rctx->mode & RK_CRYPTO_DEC) {
342
/* we backup last block of source to be used as IV at next step */
343
offset = sgs->length - ivsize;
344
scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
345
}
346
if (sgs == sgd) {
347
err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
348
if (err <= 0) {
349
err = -EINVAL;
350
goto theend_iv;
351
}
352
} else {
353
err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
354
if (err <= 0) {
355
err = -EINVAL;
356
goto theend_iv;
357
}
358
err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
359
if (err <= 0) {
360
err = -EINVAL;
361
goto theend_sgs;
362
}
363
}
364
err = 0;
365
rk_cipher_hw_init(rkc, areq);
366
if (ivsize) {
367
if (ivsize == DES_BLOCK_SIZE)
368
memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
369
else
370
memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
371
}
372
reinit_completion(&rkc->complete);
373
rkc->status = 0;
374
375
todo = min(sg_dma_len(sgs), len);
376
len -= todo;
377
crypto_dma_start(rkc, sgs, sgd, todo / 4);
378
wait_for_completion_interruptible_timeout(&rkc->complete,
379
msecs_to_jiffies(2000));
380
if (!rkc->status) {
381
dev_err(rkc->dev, "DMA timeout\n");
382
err = -EFAULT;
383
goto theend;
384
}
385
if (sgs == sgd) {
386
dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
387
} else {
388
dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
389
dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
390
}
391
if (rctx->mode & RK_CRYPTO_DEC) {
392
memcpy(iv, biv, ivsize);
393
ivtouse = iv;
394
} else {
395
offset = sgd->length - ivsize;
396
scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
397
ivtouse = iv;
398
}
399
sgs = sg_next(sgs);
400
sgd = sg_next(sgd);
401
}
402
403
if (areq->iv && ivsize > 0) {
404
offset = areq->cryptlen - ivsize;
405
if (rctx->mode & RK_CRYPTO_DEC) {
406
memcpy(areq->iv, rctx->backup_iv, ivsize);
407
memzero_explicit(rctx->backup_iv, ivsize);
408
} else {
409
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
410
ivsize, 0);
411
}
412
}
413
414
theend:
415
pm_runtime_put_autosuspend(rkc->dev);
416
417
local_bh_disable();
418
crypto_finalize_skcipher_request(engine, areq, err);
419
local_bh_enable();
420
return 0;
421
422
theend_sgs:
423
if (sgs == sgd) {
424
dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
425
} else {
426
dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
427
dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
428
}
429
theend_iv:
430
return err;
431
}
432
433
static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
434
{
435
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
436
const char *name = crypto_tfm_alg_name(&tfm->base);
437
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
438
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
439
440
ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
441
if (IS_ERR(ctx->fallback_tfm)) {
442
dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
443
name, PTR_ERR(ctx->fallback_tfm));
444
return PTR_ERR(ctx->fallback_tfm);
445
}
446
447
crypto_skcipher_set_reqsize(tfm, sizeof(struct rk_cipher_rctx) +
448
crypto_skcipher_reqsize(ctx->fallback_tfm));
449
450
return 0;
451
}
452
453
static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
454
{
455
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
456
457
memzero_explicit(ctx->key, ctx->keylen);
458
crypto_free_skcipher(ctx->fallback_tfm);
459
}
460
461
struct rk_crypto_tmp rk_ecb_aes_alg = {
462
.type = CRYPTO_ALG_TYPE_SKCIPHER,
463
.alg.skcipher.base = {
464
.base.cra_name = "ecb(aes)",
465
.base.cra_driver_name = "ecb-aes-rk",
466
.base.cra_priority = 300,
467
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
468
.base.cra_blocksize = AES_BLOCK_SIZE,
469
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
470
.base.cra_alignmask = 0x0f,
471
.base.cra_module = THIS_MODULE,
472
473
.init = rk_cipher_tfm_init,
474
.exit = rk_cipher_tfm_exit,
475
.min_keysize = AES_MIN_KEY_SIZE,
476
.max_keysize = AES_MAX_KEY_SIZE,
477
.setkey = rk_aes_setkey,
478
.encrypt = rk_aes_ecb_encrypt,
479
.decrypt = rk_aes_ecb_decrypt,
480
},
481
.alg.skcipher.op = {
482
.do_one_request = rk_cipher_run,
483
},
484
};
485
486
struct rk_crypto_tmp rk_cbc_aes_alg = {
487
.type = CRYPTO_ALG_TYPE_SKCIPHER,
488
.alg.skcipher.base = {
489
.base.cra_name = "cbc(aes)",
490
.base.cra_driver_name = "cbc-aes-rk",
491
.base.cra_priority = 300,
492
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
493
.base.cra_blocksize = AES_BLOCK_SIZE,
494
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
495
.base.cra_alignmask = 0x0f,
496
.base.cra_module = THIS_MODULE,
497
498
.init = rk_cipher_tfm_init,
499
.exit = rk_cipher_tfm_exit,
500
.min_keysize = AES_MIN_KEY_SIZE,
501
.max_keysize = AES_MAX_KEY_SIZE,
502
.ivsize = AES_BLOCK_SIZE,
503
.setkey = rk_aes_setkey,
504
.encrypt = rk_aes_cbc_encrypt,
505
.decrypt = rk_aes_cbc_decrypt,
506
},
507
.alg.skcipher.op = {
508
.do_one_request = rk_cipher_run,
509
},
510
};
511
512
struct rk_crypto_tmp rk_ecb_des_alg = {
513
.type = CRYPTO_ALG_TYPE_SKCIPHER,
514
.alg.skcipher.base = {
515
.base.cra_name = "ecb(des)",
516
.base.cra_driver_name = "ecb-des-rk",
517
.base.cra_priority = 300,
518
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
519
.base.cra_blocksize = DES_BLOCK_SIZE,
520
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
521
.base.cra_alignmask = 0x07,
522
.base.cra_module = THIS_MODULE,
523
524
.init = rk_cipher_tfm_init,
525
.exit = rk_cipher_tfm_exit,
526
.min_keysize = DES_KEY_SIZE,
527
.max_keysize = DES_KEY_SIZE,
528
.setkey = rk_des_setkey,
529
.encrypt = rk_des_ecb_encrypt,
530
.decrypt = rk_des_ecb_decrypt,
531
},
532
.alg.skcipher.op = {
533
.do_one_request = rk_cipher_run,
534
},
535
};
536
537
struct rk_crypto_tmp rk_cbc_des_alg = {
538
.type = CRYPTO_ALG_TYPE_SKCIPHER,
539
.alg.skcipher.base = {
540
.base.cra_name = "cbc(des)",
541
.base.cra_driver_name = "cbc-des-rk",
542
.base.cra_priority = 300,
543
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
544
.base.cra_blocksize = DES_BLOCK_SIZE,
545
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
546
.base.cra_alignmask = 0x07,
547
.base.cra_module = THIS_MODULE,
548
549
.init = rk_cipher_tfm_init,
550
.exit = rk_cipher_tfm_exit,
551
.min_keysize = DES_KEY_SIZE,
552
.max_keysize = DES_KEY_SIZE,
553
.ivsize = DES_BLOCK_SIZE,
554
.setkey = rk_des_setkey,
555
.encrypt = rk_des_cbc_encrypt,
556
.decrypt = rk_des_cbc_decrypt,
557
},
558
.alg.skcipher.op = {
559
.do_one_request = rk_cipher_run,
560
},
561
};
562
563
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
564
.type = CRYPTO_ALG_TYPE_SKCIPHER,
565
.alg.skcipher.base = {
566
.base.cra_name = "ecb(des3_ede)",
567
.base.cra_driver_name = "ecb-des3-ede-rk",
568
.base.cra_priority = 300,
569
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
570
.base.cra_blocksize = DES_BLOCK_SIZE,
571
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
572
.base.cra_alignmask = 0x07,
573
.base.cra_module = THIS_MODULE,
574
575
.init = rk_cipher_tfm_init,
576
.exit = rk_cipher_tfm_exit,
577
.min_keysize = DES3_EDE_KEY_SIZE,
578
.max_keysize = DES3_EDE_KEY_SIZE,
579
.setkey = rk_tdes_setkey,
580
.encrypt = rk_des3_ede_ecb_encrypt,
581
.decrypt = rk_des3_ede_ecb_decrypt,
582
},
583
.alg.skcipher.op = {
584
.do_one_request = rk_cipher_run,
585
},
586
};
587
588
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
589
.type = CRYPTO_ALG_TYPE_SKCIPHER,
590
.alg.skcipher.base = {
591
.base.cra_name = "cbc(des3_ede)",
592
.base.cra_driver_name = "cbc-des3-ede-rk",
593
.base.cra_priority = 300,
594
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
595
.base.cra_blocksize = DES_BLOCK_SIZE,
596
.base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
597
.base.cra_alignmask = 0x07,
598
.base.cra_module = THIS_MODULE,
599
600
.init = rk_cipher_tfm_init,
601
.exit = rk_cipher_tfm_exit,
602
.min_keysize = DES3_EDE_KEY_SIZE,
603
.max_keysize = DES3_EDE_KEY_SIZE,
604
.ivsize = DES_BLOCK_SIZE,
605
.setkey = rk_tdes_setkey,
606
.encrypt = rk_des3_ede_cbc_encrypt,
607
.decrypt = rk_des3_ede_cbc_decrypt,
608
},
609
.alg.skcipher.op = {
610
.do_one_request = rk_cipher_run,
611
},
612
};
613
614