Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/hisilicon/sec2/sec_crypto.c
51371 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (c) 2019 HiSilicon Limited. */
3
4
#include <crypto/aes.h>
5
#include <crypto/aead.h>
6
#include <crypto/algapi.h>
7
#include <crypto/authenc.h>
8
#include <crypto/des.h>
9
#include <crypto/hash.h>
10
#include <crypto/internal/aead.h>
11
#include <crypto/internal/des.h>
12
#include <crypto/sha1.h>
13
#include <crypto/sha2.h>
14
#include <crypto/skcipher.h>
15
#include <crypto/xts.h>
16
#include <linux/crypto.h>
17
#include <linux/dma-mapping.h>
18
#include <linux/idr.h>
19
20
#include "sec.h"
21
#include "sec_crypto.h"
22
23
#define SEC_PRIORITY 4001
24
#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
25
#define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
26
#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
27
#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
28
#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
29
30
/* SEC sqe(bd) bit operational relative MACRO */
31
#define SEC_DE_OFFSET 1
32
#define SEC_CIPHER_OFFSET 4
33
#define SEC_SCENE_OFFSET 3
34
#define SEC_DST_SGL_OFFSET 2
35
#define SEC_SRC_SGL_OFFSET 7
36
#define SEC_CKEY_OFFSET 9
37
#define SEC_CMODE_OFFSET 12
38
#define SEC_AKEY_OFFSET 5
39
#define SEC_AEAD_ALG_OFFSET 11
40
#define SEC_AUTH_OFFSET 6
41
42
#define SEC_DE_OFFSET_V3 9
43
#define SEC_SCENE_OFFSET_V3 5
44
#define SEC_CKEY_OFFSET_V3 13
45
#define SEC_CTR_CNT_OFFSET 25
46
#define SEC_CTR_CNT_ROLLOVER 2
47
#define SEC_SRC_SGL_OFFSET_V3 11
48
#define SEC_DST_SGL_OFFSET_V3 14
49
#define SEC_CALG_OFFSET_V3 4
50
#define SEC_AKEY_OFFSET_V3 9
51
#define SEC_MAC_OFFSET_V3 4
52
#define SEC_AUTH_ALG_OFFSET_V3 15
53
#define SEC_CIPHER_AUTH_V3 0xbf
54
#define SEC_AUTH_CIPHER_V3 0x40
55
#define SEC_FLAG_OFFSET 7
56
#define SEC_FLAG_MASK 0x0780
57
#define SEC_DONE_MASK 0x0001
58
#define SEC_ICV_MASK 0x000E
59
60
#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
61
#define SEC_SGL_SGE_NR 128
62
#define SEC_CIPHER_AUTH 0xfe
63
#define SEC_AUTH_CIPHER 0x1
64
#define SEC_MAX_MAC_LEN 64
65
#define SEC_MAX_AAD_LEN 65535
66
#define SEC_MAX_CCM_AAD_LEN 65279
67
#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
68
69
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
70
#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
71
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
72
SEC_MAX_MAC_LEN * 2)
73
#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
74
#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
75
#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
76
SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
77
#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
78
SEC_PBUF_LEFT_SZ(depth))
79
80
#define SEC_SQE_CFLAG 2
81
#define SEC_SQE_AEAD_FLAG 3
82
#define SEC_SQE_DONE 0x1
83
#define SEC_ICV_ERR 0x2
84
#define MAC_LEN_MASK 0x1U
85
#define MAX_INPUT_DATA_LEN 0xFFFE00
86
#define BITS_MASK 0xFF
87
#define WORD_MASK 0x3
88
#define BYTE_BITS 0x8
89
#define BYTES_TO_WORDS(bcount) ((bcount) >> 2)
90
#define SEC_XTS_NAME_SZ 0x3
91
#define IV_CM_CAL_NUM 2
92
#define IV_CL_MASK 0x7
93
#define IV_CL_MIN 2
94
#define IV_CL_MID 4
95
#define IV_CL_MAX 8
96
#define IV_FLAGS_OFFSET 0x6
97
#define IV_CM_OFFSET 0x3
98
#define IV_LAST_BYTE1 1
99
#define IV_LAST_BYTE2 2
100
#define IV_LAST_BYTE_MASK 0xFF
101
#define IV_CTR_INIT 0x1
102
#define IV_BYTE_OFFSET 0x8
103
#define SEC_GCM_MIN_AUTH_SZ 0x8
104
#define SEC_RETRY_MAX_CNT 5U
105
106
static DEFINE_MUTEX(sec_algs_lock);
107
static unsigned int sec_available_devs;
108
109
struct sec_skcipher {
110
u64 alg_msk;
111
struct skcipher_alg alg;
112
};
113
114
struct sec_aead {
115
u64 alg_msk;
116
struct aead_alg alg;
117
};
118
119
static int sec_aead_soft_crypto(struct sec_ctx *ctx,
120
struct aead_request *aead_req,
121
bool encrypt);
122
static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
123
struct skcipher_request *sreq, bool encrypt);
124
125
static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
126
{
127
int req_id;
128
129
spin_lock_bh(&qp_ctx->id_lock);
130
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
131
spin_unlock_bh(&qp_ctx->id_lock);
132
return req_id;
133
}
134
135
static void sec_free_req_id(struct sec_req *req)
136
{
137
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
138
int req_id = req->req_id;
139
140
if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
141
dev_err(req->ctx->dev, "free request id invalid!\n");
142
return;
143
}
144
145
spin_lock_bh(&qp_ctx->id_lock);
146
idr_remove(&qp_ctx->req_idr, req_id);
147
spin_unlock_bh(&qp_ctx->id_lock);
148
}
149
150
static void pre_parse_finished_bd(struct bd_status *status, void *resp)
151
{
152
struct sec_sqe *bd = resp;
153
154
status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
155
status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
156
status->flag = (le16_to_cpu(bd->type2.done_flag) &
157
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
158
status->tag = le16_to_cpu(bd->type2.tag);
159
status->err_type = bd->type2.error_type;
160
}
161
162
static void pre_parse_finished_bd3(struct bd_status *status, void *resp)
163
{
164
struct sec_sqe3 *bd3 = resp;
165
166
status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
167
status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
168
status->flag = (le16_to_cpu(bd3->done_flag) &
169
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
170
status->tag = le64_to_cpu(bd3->tag);
171
status->err_type = bd3->error_type;
172
}
173
174
static int sec_cb_status_check(struct sec_req *req,
175
struct bd_status *status)
176
{
177
struct sec_ctx *ctx = req->ctx;
178
179
if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
180
dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
181
req->err_type, status->done);
182
return -EIO;
183
}
184
185
if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
186
if (unlikely(status->flag != SEC_SQE_CFLAG)) {
187
dev_err_ratelimited(ctx->dev, "flag[%u]\n",
188
status->flag);
189
return -EIO;
190
}
191
} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
192
if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
193
status->icv == SEC_ICV_ERR)) {
194
dev_err_ratelimited(ctx->dev,
195
"flag[%u], icv[%u]\n",
196
status->flag, status->icv);
197
return -EBADMSG;
198
}
199
}
200
201
return 0;
202
}
203
204
static int qp_send_message(struct sec_req *req)
205
{
206
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
207
int ret;
208
209
if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1)
210
return -EBUSY;
211
212
spin_lock_bh(&qp_ctx->req_lock);
213
if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) {
214
spin_unlock_bh(&qp_ctx->req_lock);
215
return -EBUSY;
216
}
217
218
if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) {
219
req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);
220
qp_ctx->req_list[qp_ctx->send_head] = req;
221
}
222
223
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
224
if (ret) {
225
spin_unlock_bh(&qp_ctx->req_lock);
226
return ret;
227
}
228
if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2)
229
qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth;
230
231
spin_unlock_bh(&qp_ctx->req_lock);
232
233
atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
234
return -EINPROGRESS;
235
}
236
237
static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
238
{
239
struct sec_req *req, *tmp;
240
int ret;
241
242
list_for_each_entry_safe(req, tmp, &qp_ctx->qp->backlog.list, list) {
243
list_del(&req->list);
244
ctx->req_op->buf_unmap(ctx, req);
245
if (req->req_id >= 0)
246
sec_free_req_id(req);
247
248
if (ctx->alg_type == SEC_AEAD)
249
ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
250
req->c_req.encrypt);
251
else
252
ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
253
req->c_req.encrypt);
254
255
/* Wake up the busy thread first, then return the errno. */
256
crypto_request_complete(req->base, -EINPROGRESS);
257
crypto_request_complete(req->base, ret);
258
}
259
}
260
261
static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
262
{
263
struct hisi_qp *qp = qp_ctx->qp;
264
struct sec_req *req, *tmp;
265
int ret;
266
267
spin_lock_bh(&qp->backlog.lock);
268
list_for_each_entry_safe(req, tmp, &qp->backlog.list, list) {
269
ret = qp_send_message(req);
270
switch (ret) {
271
case -EINPROGRESS:
272
list_del(&req->list);
273
crypto_request_complete(req->base, -EINPROGRESS);
274
break;
275
case -EBUSY:
276
/* Device is busy and stop send any request. */
277
goto unlock;
278
default:
279
/* Release memory resources and send all requests through software. */
280
sec_alg_send_backlog_soft(ctx, qp_ctx);
281
goto unlock;
282
}
283
}
284
285
unlock:
286
spin_unlock_bh(&qp->backlog.lock);
287
}
288
289
static void sec_req_cb(struct hisi_qp *qp, void *resp)
290
{
291
const struct sec_sqe *sqe = qp->msg[qp->qp_status.cq_head];
292
struct sec_req *req = container_of(sqe, struct sec_req, sec_sqe);
293
struct sec_ctx *ctx = req->ctx;
294
struct sec_dfx *dfx = &ctx->sec->debug.dfx;
295
struct bd_status status;
296
int err;
297
298
pre_parse_finished_bd(&status, resp);
299
300
req->err_type = status.err_type;
301
err = sec_cb_status_check(req, &status);
302
if (err)
303
atomic64_inc(&dfx->done_flag_cnt);
304
305
atomic64_inc(&dfx->recv_cnt);
306
307
ctx->req_op->buf_unmap(ctx, req);
308
ctx->req_op->callback(ctx, req, err);
309
}
310
311
static void sec_req_cb3(struct hisi_qp *qp, void *resp)
312
{
313
struct bd_status status;
314
struct sec_ctx *ctx;
315
struct sec_dfx *dfx;
316
struct sec_req *req;
317
int err;
318
319
pre_parse_finished_bd3(&status, resp);
320
321
req = (void *)(uintptr_t)status.tag;
322
req->err_type = status.err_type;
323
ctx = req->ctx;
324
dfx = &ctx->sec->debug.dfx;
325
326
err = sec_cb_status_check(req, &status);
327
if (err)
328
atomic64_inc(&dfx->done_flag_cnt);
329
330
atomic64_inc(&dfx->recv_cnt);
331
332
ctx->req_op->buf_unmap(ctx, req);
333
ctx->req_op->callback(ctx, req, err);
334
}
335
336
static int sec_alg_send_message_retry(struct sec_req *req)
337
{
338
int ctr = 0;
339
int ret;
340
341
do {
342
ret = qp_send_message(req);
343
} while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT);
344
345
return ret;
346
}
347
348
static int sec_alg_try_enqueue(struct sec_req *req)
349
{
350
struct hisi_qp *qp = req->qp_ctx->qp;
351
352
/* Check if any request is already backlogged */
353
if (!list_empty(&qp->backlog.list))
354
return -EBUSY;
355
356
/* Try to enqueue to HW ring */
357
return qp_send_message(req);
358
}
359
360
361
static int sec_alg_send_message_maybacklog(struct sec_req *req)
362
{
363
struct hisi_qp *qp = req->qp_ctx->qp;
364
int ret;
365
366
ret = sec_alg_try_enqueue(req);
367
if (ret != -EBUSY)
368
return ret;
369
370
spin_lock_bh(&qp->backlog.lock);
371
ret = sec_alg_try_enqueue(req);
372
if (ret == -EBUSY)
373
list_add_tail(&req->list, &qp->backlog.list);
374
spin_unlock_bh(&qp->backlog.lock);
375
376
return ret;
377
}
378
379
static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
380
{
381
if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)
382
return sec_alg_send_message_maybacklog(req);
383
384
return sec_alg_send_message_retry(req);
385
}
386
387
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
388
{
389
u16 q_depth = res->depth;
390
int i;
391
392
res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
393
&res->c_ivin_dma, GFP_KERNEL);
394
if (!res->c_ivin)
395
return -ENOMEM;
396
397
for (i = 1; i < q_depth; i++) {
398
res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
399
res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
400
}
401
402
return 0;
403
}
404
405
static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
406
{
407
if (res->c_ivin)
408
dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
409
res->c_ivin, res->c_ivin_dma);
410
}
411
412
static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
413
{
414
u16 q_depth = res->depth;
415
int i;
416
417
res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
418
&res->a_ivin_dma, GFP_KERNEL);
419
if (!res->a_ivin)
420
return -ENOMEM;
421
422
for (i = 1; i < q_depth; i++) {
423
res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
424
res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
425
}
426
427
return 0;
428
}
429
430
static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
431
{
432
if (res->a_ivin)
433
dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
434
res->a_ivin, res->a_ivin_dma);
435
}
436
437
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
438
{
439
u16 q_depth = res->depth;
440
int i;
441
442
res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
443
&res->out_mac_dma, GFP_KERNEL);
444
if (!res->out_mac)
445
return -ENOMEM;
446
447
for (i = 1; i < q_depth; i++) {
448
res[i].out_mac_dma = res->out_mac_dma +
449
i * (SEC_MAX_MAC_LEN << 1);
450
res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
451
}
452
453
return 0;
454
}
455
456
static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
457
{
458
if (res->out_mac)
459
dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
460
res->out_mac, res->out_mac_dma);
461
}
462
463
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
464
{
465
if (res->pbuf)
466
dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
467
res->pbuf, res->pbuf_dma);
468
}
469
470
/*
471
* To improve performance, pbuffer is used for
472
* small packets (< 512Bytes) as IOMMU translation using.
473
*/
474
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
475
{
476
u16 q_depth = res->depth;
477
int size = SEC_PBUF_PAGE_NUM(q_depth);
478
int pbuf_page_offset;
479
int i, j, k;
480
481
res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
482
&res->pbuf_dma, GFP_KERNEL);
483
if (!res->pbuf)
484
return -ENOMEM;
485
486
/*
487
* SEC_PBUF_PKG contains data pbuf, iv and
488
* out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
489
* Every PAGE contains six SEC_PBUF_PKG
490
* The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
491
* So we need SEC_PBUF_PAGE_NUM numbers of PAGE
492
* for the SEC_TOTAL_PBUF_SZ
493
*/
494
for (i = 0; i <= size; i++) {
495
pbuf_page_offset = PAGE_SIZE * i;
496
for (j = 0; j < SEC_PBUF_NUM; j++) {
497
k = i * SEC_PBUF_NUM + j;
498
if (k == q_depth)
499
break;
500
res[k].pbuf = res->pbuf +
501
j * SEC_PBUF_PKG + pbuf_page_offset;
502
res[k].pbuf_dma = res->pbuf_dma +
503
j * SEC_PBUF_PKG + pbuf_page_offset;
504
}
505
}
506
507
return 0;
508
}
509
510
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
511
struct sec_qp_ctx *qp_ctx)
512
{
513
struct sec_alg_res *res = qp_ctx->res;
514
struct device *dev = ctx->dev;
515
int ret;
516
517
ret = sec_alloc_civ_resource(dev, res);
518
if (ret)
519
return ret;
520
521
if (ctx->alg_type == SEC_AEAD) {
522
ret = sec_alloc_aiv_resource(dev, res);
523
if (ret)
524
goto alloc_aiv_fail;
525
526
ret = sec_alloc_mac_resource(dev, res);
527
if (ret)
528
goto alloc_mac_fail;
529
}
530
if (ctx->pbuf_supported) {
531
ret = sec_alloc_pbuf_resource(dev, res);
532
if (ret) {
533
dev_err(dev, "fail to alloc pbuf dma resource!\n");
534
goto alloc_pbuf_fail;
535
}
536
}
537
538
return 0;
539
540
alloc_pbuf_fail:
541
if (ctx->alg_type == SEC_AEAD)
542
sec_free_mac_resource(dev, qp_ctx->res);
543
alloc_mac_fail:
544
if (ctx->alg_type == SEC_AEAD)
545
sec_free_aiv_resource(dev, res);
546
alloc_aiv_fail:
547
sec_free_civ_resource(dev, res);
548
return ret;
549
}
550
551
static void sec_alg_resource_free(struct sec_ctx *ctx,
552
struct sec_qp_ctx *qp_ctx)
553
{
554
struct device *dev = ctx->dev;
555
556
sec_free_civ_resource(dev, qp_ctx->res);
557
558
if (ctx->pbuf_supported)
559
sec_free_pbuf_resource(dev, qp_ctx->res);
560
if (ctx->alg_type == SEC_AEAD) {
561
sec_free_mac_resource(dev, qp_ctx->res);
562
sec_free_aiv_resource(dev, qp_ctx->res);
563
}
564
}
565
566
static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
567
{
568
u16 q_depth = qp_ctx->qp->sq_depth;
569
struct device *dev = ctx->dev;
570
int ret = -ENOMEM;
571
572
qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
573
if (!qp_ctx->req_list)
574
return ret;
575
576
qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
577
if (!qp_ctx->res)
578
goto err_free_req_list;
579
qp_ctx->res->depth = q_depth;
580
581
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
582
if (IS_ERR(qp_ctx->c_in_pool)) {
583
dev_err(dev, "fail to create sgl pool for input!\n");
584
goto err_free_res;
585
}
586
587
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
588
if (IS_ERR(qp_ctx->c_out_pool)) {
589
dev_err(dev, "fail to create sgl pool for output!\n");
590
goto err_free_c_in_pool;
591
}
592
593
ret = sec_alg_resource_alloc(ctx, qp_ctx);
594
if (ret)
595
goto err_free_c_out_pool;
596
597
return 0;
598
599
err_free_c_out_pool:
600
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
601
err_free_c_in_pool:
602
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
603
err_free_res:
604
kfree(qp_ctx->res);
605
err_free_req_list:
606
kfree(qp_ctx->req_list);
607
return ret;
608
}
609
610
static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
611
{
612
struct device *dev = ctx->dev;
613
614
sec_alg_resource_free(ctx, qp_ctx);
615
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
616
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
617
kfree(qp_ctx->res);
618
kfree(qp_ctx->req_list);
619
}
620
621
static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
622
{
623
struct sec_qp_ctx *qp_ctx;
624
struct hisi_qp *qp;
625
int ret;
626
627
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
628
qp = ctx->qps[qp_ctx_id];
629
qp_ctx->qp = qp;
630
qp_ctx->ctx = ctx;
631
632
if (ctx->type_supported == SEC_BD_TYPE3)
633
qp->req_cb = sec_req_cb3;
634
else
635
qp->req_cb = sec_req_cb;
636
637
spin_lock_init(&qp_ctx->req_lock);
638
idr_init(&qp_ctx->req_idr);
639
spin_lock_init(&qp_ctx->id_lock);
640
qp_ctx->send_head = 0;
641
642
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
643
if (ret)
644
goto err_destroy_idr;
645
646
return 0;
647
648
err_destroy_idr:
649
idr_destroy(&qp_ctx->req_idr);
650
return ret;
651
}
652
653
static void sec_release_qp_ctx(struct sec_ctx *ctx,
654
struct sec_qp_ctx *qp_ctx)
655
{
656
sec_free_qp_ctx_resource(ctx, qp_ctx);
657
idr_destroy(&qp_ctx->req_idr);
658
}
659
660
static int sec_ctx_base_init(struct sec_ctx *ctx)
661
{
662
struct sec_dev *sec;
663
int i, ret;
664
665
ctx->qps = sec_create_qps();
666
if (!ctx->qps)
667
return -ENODEV;
668
669
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
670
ctx->sec = sec;
671
ctx->dev = &sec->qm.pdev->dev;
672
ctx->hlf_q_num = sec->ctx_q_num >> 1;
673
674
ctx->pbuf_supported = ctx->sec->iommu_used;
675
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
676
GFP_KERNEL);
677
if (!ctx->qp_ctx) {
678
ret = -ENOMEM;
679
goto err_destroy_qps;
680
}
681
682
for (i = 0; i < sec->ctx_q_num; i++) {
683
ret = sec_create_qp_ctx(ctx, i);
684
if (ret)
685
goto err_sec_release_qp_ctx;
686
}
687
688
return 0;
689
690
err_sec_release_qp_ctx:
691
for (i = i - 1; i >= 0; i--)
692
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
693
kfree(ctx->qp_ctx);
694
err_destroy_qps:
695
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
696
return ret;
697
}
698
699
static void sec_ctx_base_uninit(struct sec_ctx *ctx)
700
{
701
int i;
702
703
if (!ctx->qps)
704
return;
705
706
for (i = 0; i < ctx->sec->ctx_q_num; i++)
707
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
708
709
sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
710
kfree(ctx->qp_ctx);
711
}
712
713
static int sec_cipher_init(struct sec_ctx *ctx)
714
{
715
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
716
717
if (!ctx->qps)
718
return 0;
719
720
c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
721
&c_ctx->c_key_dma, GFP_KERNEL);
722
if (!c_ctx->c_key)
723
return -ENOMEM;
724
725
return 0;
726
}
727
728
static void sec_cipher_uninit(struct sec_ctx *ctx)
729
{
730
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
731
732
if (!ctx->qps)
733
return;
734
735
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
736
dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
737
c_ctx->c_key, c_ctx->c_key_dma);
738
}
739
740
static int sec_auth_init(struct sec_ctx *ctx)
741
{
742
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
743
744
a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
745
&a_ctx->a_key_dma, GFP_KERNEL);
746
if (!a_ctx->a_key)
747
return -ENOMEM;
748
749
return 0;
750
}
751
752
static void sec_auth_uninit(struct sec_ctx *ctx)
753
{
754
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
755
756
if (!ctx->qps)
757
return;
758
759
memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
760
dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
761
a_ctx->a_key, a_ctx->a_key_dma);
762
}
763
764
static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
765
{
766
const char *alg = crypto_tfm_alg_name(&tfm->base);
767
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
768
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
769
770
c_ctx->fallback = false;
771
772
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
773
CRYPTO_ALG_NEED_FALLBACK);
774
if (IS_ERR(c_ctx->fbtfm)) {
775
pr_err("failed to alloc fallback tfm for %s!\n", alg);
776
return PTR_ERR(c_ctx->fbtfm);
777
}
778
779
return 0;
780
}
781
782
static int sec_skcipher_init(struct crypto_skcipher *tfm)
783
{
784
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
785
int ret;
786
787
ctx->alg_type = SEC_SKCIPHER;
788
crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req));
789
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
790
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
791
pr_err("get error skcipher iv size!\n");
792
return -EINVAL;
793
}
794
795
ret = sec_ctx_base_init(ctx);
796
if (ret && ret != -ENODEV)
797
return ret;
798
799
ret = sec_cipher_init(ctx);
800
if (ret)
801
goto err_cipher_init;
802
803
ret = sec_skcipher_fbtfm_init(tfm);
804
if (ret)
805
goto err_fbtfm_init;
806
807
return 0;
808
809
err_fbtfm_init:
810
sec_cipher_uninit(ctx);
811
err_cipher_init:
812
sec_ctx_base_uninit(ctx);
813
return ret;
814
}
815
816
static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
817
{
818
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
819
820
if (ctx->c_ctx.fbtfm)
821
crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
822
823
sec_cipher_uninit(ctx);
824
sec_ctx_base_uninit(ctx);
825
}
826
827
static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen)
828
{
829
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
830
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
831
int ret;
832
833
ret = verify_skcipher_des3_key(tfm, key);
834
if (ret)
835
return ret;
836
837
switch (keylen) {
838
case SEC_DES3_2KEY_SIZE:
839
c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
840
break;
841
case SEC_DES3_3KEY_SIZE:
842
c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
843
break;
844
default:
845
return -EINVAL;
846
}
847
848
return 0;
849
}
850
851
static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
852
const u32 keylen,
853
const enum sec_cmode c_mode)
854
{
855
if (c_mode == SEC_CMODE_XTS) {
856
switch (keylen) {
857
case SEC_XTS_MIN_KEY_SIZE:
858
c_ctx->c_key_len = SEC_CKEY_128BIT;
859
break;
860
case SEC_XTS_MID_KEY_SIZE:
861
c_ctx->fallback = true;
862
break;
863
case SEC_XTS_MAX_KEY_SIZE:
864
c_ctx->c_key_len = SEC_CKEY_256BIT;
865
break;
866
default:
867
pr_err("hisi_sec2: xts mode key error!\n");
868
return -EINVAL;
869
}
870
} else {
871
if (c_ctx->c_alg == SEC_CALG_SM4 &&
872
keylen != AES_KEYSIZE_128) {
873
pr_err("hisi_sec2: sm4 key error!\n");
874
return -EINVAL;
875
} else {
876
switch (keylen) {
877
case AES_KEYSIZE_128:
878
c_ctx->c_key_len = SEC_CKEY_128BIT;
879
break;
880
case AES_KEYSIZE_192:
881
c_ctx->c_key_len = SEC_CKEY_192BIT;
882
break;
883
case AES_KEYSIZE_256:
884
c_ctx->c_key_len = SEC_CKEY_256BIT;
885
break;
886
default:
887
pr_err("hisi_sec2: aes key error!\n");
888
return -EINVAL;
889
}
890
}
891
}
892
893
return 0;
894
}
895
896
static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
897
const u32 keylen, const enum sec_calg c_alg,
898
const enum sec_cmode c_mode)
899
{
900
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
901
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
902
struct device *dev = ctx->dev;
903
int ret;
904
905
if (!ctx->qps)
906
goto set_soft_key;
907
908
if (c_mode == SEC_CMODE_XTS) {
909
ret = xts_verify_key(tfm, key, keylen);
910
if (ret) {
911
dev_err(dev, "xts mode key err!\n");
912
return ret;
913
}
914
}
915
916
c_ctx->c_alg = c_alg;
917
c_ctx->c_mode = c_mode;
918
919
switch (c_alg) {
920
case SEC_CALG_3DES:
921
ret = sec_skcipher_3des_setkey(tfm, key, keylen);
922
break;
923
case SEC_CALG_AES:
924
case SEC_CALG_SM4:
925
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
926
break;
927
default:
928
dev_err(dev, "sec c_alg err!\n");
929
return -EINVAL;
930
}
931
932
if (ret) {
933
dev_err(dev, "set sec key err!\n");
934
return ret;
935
}
936
937
memcpy(c_ctx->c_key, key, keylen);
938
939
set_soft_key:
940
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
941
if (ret) {
942
dev_err(dev, "failed to set fallback skcipher key!\n");
943
return ret;
944
}
945
946
return 0;
947
}
948
949
#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
950
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
951
u32 keylen) \
952
{ \
953
return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
954
}
955
956
GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
957
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
958
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
959
GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
960
GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
961
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
962
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
963
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
964
GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
965
966
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
967
struct scatterlist *src)
968
{
969
struct aead_request *aead_req = req->aead_req.aead_req;
970
struct sec_cipher_req *c_req = &req->c_req;
971
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
972
struct sec_request_buf *buf = &req->buf;
973
struct device *dev = ctx->dev;
974
int copy_size, pbuf_length;
975
int req_id = req->req_id;
976
struct crypto_aead *tfm;
977
u8 *mac_offset, *pbuf;
978
size_t authsize;
979
980
if (ctx->alg_type == SEC_AEAD)
981
copy_size = aead_req->cryptlen + aead_req->assoclen;
982
else
983
copy_size = c_req->c_len;
984
985
986
pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;
987
pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size);
988
if (unlikely(pbuf_length != copy_size)) {
989
dev_err(dev, "copy src data to pbuf error!\n");
990
return -EINVAL;
991
}
992
if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
993
tfm = crypto_aead_reqtfm(aead_req);
994
authsize = crypto_aead_authsize(tfm);
995
mac_offset = pbuf + copy_size - authsize;
996
memcpy(req->aead_req.out_mac, mac_offset, authsize);
997
}
998
999
if (req->req_id < 0) {
1000
buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
1001
if (unlikely(dma_mapping_error(dev, buf->in_dma)))
1002
return -ENOMEM;
1003
1004
buf->out_dma = buf->in_dma;
1005
return 0;
1006
}
1007
1008
req->in_dma = qp_ctx->res[req_id].pbuf_dma;
1009
c_req->c_out_dma = req->in_dma;
1010
1011
return 0;
1012
}
1013
1014
static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
1015
struct scatterlist *dst)
1016
{
1017
struct aead_request *aead_req = req->aead_req.aead_req;
1018
struct sec_cipher_req *c_req = &req->c_req;
1019
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1020
struct sec_request_buf *buf = &req->buf;
1021
int copy_size, pbuf_length;
1022
int req_id = req->req_id;
1023
1024
if (ctx->alg_type == SEC_AEAD)
1025
copy_size = c_req->c_len + aead_req->assoclen;
1026
else
1027
copy_size = c_req->c_len;
1028
1029
if (req->req_id < 0)
1030
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size);
1031
else
1032
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf,
1033
copy_size);
1034
if (unlikely(pbuf_length != copy_size))
1035
dev_err(ctx->dev, "copy pbuf data to dst error!\n");
1036
1037
if (req->req_id < 0)
1038
dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
1039
}
1040
1041
static int sec_aead_mac_init(struct sec_aead_req *req)
1042
{
1043
struct aead_request *aead_req = req->aead_req;
1044
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1045
size_t authsize = crypto_aead_authsize(tfm);
1046
struct scatterlist *sgl = aead_req->src;
1047
u8 *mac_out = req->out_mac;
1048
size_t copy_size;
1049
off_t skip_size;
1050
1051
/* Copy input mac */
1052
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
1053
copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
1054
if (unlikely(copy_size != authsize))
1055
return -EINVAL;
1056
1057
return 0;
1058
}
1059
1060
static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge)
1061
{
1062
hw_sge->buf = sg_dma_address(sgl);
1063
hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
1064
hw_sge->page_ctrl = sg_virt(sgl);
1065
}
1066
1067
static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src,
1068
struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma,
1069
int dma_dir)
1070
{
1071
struct sec_hw_sge *curr_hw_sge = src_in->sge_entries;
1072
u32 i, sg_n, sg_n_mapped;
1073
struct scatterlist *sg;
1074
u32 sge_var = 0;
1075
1076
sg_n = sg_nents(src);
1077
sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir);
1078
if (unlikely(!sg_n_mapped)) {
1079
dev_err(dev, "dma mapping for SG error!\n");
1080
return -EINVAL;
1081
} else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) {
1082
dev_err(dev, "the number of entries in input scatterlist error!\n");
1083
dma_unmap_sg(dev, src, sg_n, dma_dir);
1084
return -EINVAL;
1085
}
1086
1087
for_each_sg(src, sg, sg_n_mapped, i) {
1088
fill_sg_to_hw_sge(sg, curr_hw_sge);
1089
curr_hw_sge++;
1090
sge_var++;
1091
}
1092
1093
src_in->entry_sum_in_sgl = cpu_to_le16(sge_var);
1094
src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM);
1095
src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM);
1096
*hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
1097
if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) {
1098
dma_unmap_sg(dev, src, sg_n, dma_dir);
1099
return -ENOMEM;
1100
}
1101
1102
return 0;
1103
}
1104
1105
static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src,
1106
dma_addr_t src_in, int dma_dir)
1107
{
1108
dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
1109
dma_unmap_sg(dev, src, sg_nents(src), dma_dir);
1110
}
1111
1112
static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,
1113
struct scatterlist *src, struct scatterlist *dst)
1114
{
1115
struct sec_hw_sgl *src_in = &req->buf.data_buf.in;
1116
struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;
1117
int ret;
1118
1119
if (dst == src) {
1120
ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,
1121
DMA_BIDIRECTIONAL);
1122
req->buf.out_dma = req->buf.in_dma;
1123
return ret;
1124
}
1125
1126
ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);
1127
if (unlikely(ret))
1128
return ret;
1129
1130
ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,
1131
DMA_FROM_DEVICE);
1132
if (unlikely(ret)) {
1133
sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
1134
return ret;
1135
}
1136
1137
return 0;
1138
}
1139
1140
static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,
1141
struct scatterlist *src, struct scatterlist *dst)
1142
{
1143
struct sec_cipher_req *c_req = &req->c_req;
1144
struct sec_aead_req *a_req = &req->aead_req;
1145
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1146
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
1147
struct device *dev = ctx->dev;
1148
enum dma_data_direction src_direction;
1149
int ret;
1150
1151
if (req->use_pbuf) {
1152
c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
1153
c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
1154
if (ctx->alg_type == SEC_AEAD) {
1155
a_req->a_ivin = res->a_ivin;
1156
a_req->a_ivin_dma = res->a_ivin_dma;
1157
a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
1158
a_req->out_mac_dma = res->pbuf_dma +
1159
SEC_PBUF_MAC_OFFSET;
1160
}
1161
return sec_cipher_pbuf_map(ctx, req, src);
1162
}
1163
1164
c_req->c_ivin = res->c_ivin;
1165
c_req->c_ivin_dma = res->c_ivin_dma;
1166
if (ctx->alg_type == SEC_AEAD) {
1167
a_req->a_ivin = res->a_ivin;
1168
a_req->a_ivin_dma = res->a_ivin_dma;
1169
a_req->out_mac = res->out_mac;
1170
a_req->out_mac_dma = res->out_mac_dma;
1171
}
1172
1173
src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1174
req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
1175
qp_ctx->c_in_pool,
1176
req->req_id,
1177
&req->in_dma, src_direction);
1178
if (IS_ERR(req->in)) {
1179
dev_err(dev, "fail to dma map input sgl buffers!\n");
1180
return PTR_ERR(req->in);
1181
}
1182
1183
if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
1184
ret = sec_aead_mac_init(a_req);
1185
if (unlikely(ret)) {
1186
dev_err(dev, "fail to init mac data for ICV!\n");
1187
hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
1188
return ret;
1189
}
1190
}
1191
1192
if (dst == src) {
1193
c_req->c_out = req->in;
1194
c_req->c_out_dma = req->in_dma;
1195
} else {
1196
c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
1197
qp_ctx->c_out_pool,
1198
req->req_id,
1199
&c_req->c_out_dma,
1200
DMA_FROM_DEVICE);
1201
1202
if (IS_ERR(c_req->c_out)) {
1203
dev_err(dev, "fail to dma map output sgl buffers!\n");
1204
hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
1205
return PTR_ERR(c_req->c_out);
1206
}
1207
}
1208
1209
return 0;
1210
}
1211
1212
static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
1213
struct scatterlist *src, struct scatterlist *dst)
1214
{
1215
struct sec_aead_req *a_req = &req->aead_req;
1216
struct sec_cipher_req *c_req = &req->c_req;
1217
bool is_aead = (ctx->alg_type == SEC_AEAD);
1218
struct device *dev = ctx->dev;
1219
int ret = -ENOMEM;
1220
1221
if (req->req_id >= 0)
1222
return sec_cipher_map_inner(ctx, req, src, dst);
1223
1224
c_req->c_ivin = c_req->c_ivin_buf;
1225
c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin,
1226
SEC_IV_SIZE, DMA_TO_DEVICE);
1227
if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma)))
1228
return -ENOMEM;
1229
1230
if (is_aead) {
1231
a_req->a_ivin = a_req->a_ivin_buf;
1232
a_req->out_mac = a_req->out_mac_buf;
1233
a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin,
1234
SEC_IV_SIZE, DMA_TO_DEVICE);
1235
if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma)))
1236
goto free_c_ivin_dma;
1237
1238
a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac,
1239
SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
1240
if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma)))
1241
goto free_a_ivin_dma;
1242
}
1243
if (req->use_pbuf) {
1244
ret = sec_cipher_pbuf_map(ctx, req, src);
1245
if (unlikely(ret))
1246
goto free_out_mac_dma;
1247
1248
return 0;
1249
}
1250
1251
if (!c_req->encrypt && is_aead) {
1252
ret = sec_aead_mac_init(a_req);
1253
if (unlikely(ret)) {
1254
dev_err(dev, "fail to init mac data for ICV!\n");
1255
goto free_out_mac_dma;
1256
}
1257
}
1258
1259
ret = sec_cipher_map_sgl(dev, req, src, dst);
1260
if (unlikely(ret)) {
1261
dev_err(dev, "fail to dma map input sgl buffers!\n");
1262
goto free_out_mac_dma;
1263
}
1264
1265
return 0;
1266
1267
free_out_mac_dma:
1268
if (is_aead)
1269
dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
1270
free_a_ivin_dma:
1271
if (is_aead)
1272
dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1273
free_c_ivin_dma:
1274
dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1275
return ret;
1276
}
1277
1278
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
1279
struct scatterlist *src, struct scatterlist *dst)
1280
{
1281
struct sec_aead_req *a_req = &req->aead_req;
1282
struct sec_cipher_req *c_req = &req->c_req;
1283
struct device *dev = ctx->dev;
1284
1285
if (req->req_id >= 0) {
1286
if (req->use_pbuf) {
1287
sec_cipher_pbuf_unmap(ctx, req, dst);
1288
} else {
1289
if (dst != src) {
1290
hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE);
1291
hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);
1292
} else {
1293
hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);
1294
}
1295
}
1296
return;
1297
}
1298
1299
if (req->use_pbuf) {
1300
sec_cipher_pbuf_unmap(ctx, req, dst);
1301
} else {
1302
if (dst != src) {
1303
sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);
1304
sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
1305
} else {
1306
sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);
1307
}
1308
}
1309
1310
dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1311
if (ctx->alg_type == SEC_AEAD) {
1312
dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
1313
dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
1314
}
1315
}
1316
1317
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1318
{
1319
struct skcipher_request *sq = req->c_req.sk_req;
1320
1321
return sec_cipher_map(ctx, req, sq->src, sq->dst);
1322
}
1323
1324
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1325
{
1326
struct skcipher_request *sq = req->c_req.sk_req;
1327
1328
sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1329
}
1330
1331
static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
1332
struct crypto_authenc_keys *keys)
1333
{
1334
switch (keys->enckeylen) {
1335
case AES_KEYSIZE_128:
1336
c_ctx->c_key_len = SEC_CKEY_128BIT;
1337
break;
1338
case AES_KEYSIZE_192:
1339
c_ctx->c_key_len = SEC_CKEY_192BIT;
1340
break;
1341
case AES_KEYSIZE_256:
1342
c_ctx->c_key_len = SEC_CKEY_256BIT;
1343
break;
1344
default:
1345
pr_err("hisi_sec2: aead aes key error!\n");
1346
return -EINVAL;
1347
}
1348
memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
1349
1350
return 0;
1351
}
1352
1353
static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
1354
struct crypto_authenc_keys *keys)
1355
{
1356
struct crypto_shash *hash_tfm = ctx->hash_tfm;
1357
int blocksize, digestsize, ret;
1358
1359
blocksize = crypto_shash_blocksize(hash_tfm);
1360
digestsize = crypto_shash_digestsize(hash_tfm);
1361
if (keys->authkeylen > blocksize) {
1362
ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
1363
keys->authkeylen, ctx->a_key);
1364
if (ret) {
1365
pr_err("hisi_sec2: aead auth digest error!\n");
1366
return -EINVAL;
1367
}
1368
ctx->a_key_len = digestsize;
1369
} else {
1370
if (keys->authkeylen)
1371
memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
1372
ctx->a_key_len = keys->authkeylen;
1373
}
1374
1375
return 0;
1376
}
1377
1378
static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
1379
{
1380
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1381
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
1382
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1383
1384
return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
1385
}
1386
1387
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
1388
struct crypto_aead *tfm, const u8 *key,
1389
unsigned int keylen)
1390
{
1391
crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
1392
crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
1393
crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
1394
return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
1395
}
1396
1397
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1398
const u32 keylen, const enum sec_hash_alg a_alg,
1399
const enum sec_calg c_alg,
1400
const enum sec_cmode c_mode)
1401
{
1402
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1403
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1404
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1405
struct device *dev = ctx->dev;
1406
struct crypto_authenc_keys keys;
1407
int ret;
1408
1409
if (!ctx->qps)
1410
return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1411
1412
ctx->a_ctx.a_alg = a_alg;
1413
ctx->c_ctx.c_alg = c_alg;
1414
c_ctx->c_mode = c_mode;
1415
1416
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
1417
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
1418
if (ret) {
1419
dev_err(dev, "set sec aes ccm cipher key err!\n");
1420
return ret;
1421
}
1422
memcpy(c_ctx->c_key, key, keylen);
1423
1424
return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1425
}
1426
1427
ret = crypto_authenc_extractkeys(&keys, key, keylen);
1428
if (ret) {
1429
dev_err(dev, "sec extract aead keys err!\n");
1430
goto bad_key;
1431
}
1432
1433
ret = sec_aead_aes_set_key(c_ctx, &keys);
1434
if (ret) {
1435
dev_err(dev, "set sec cipher key err!\n");
1436
goto bad_key;
1437
}
1438
1439
ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
1440
if (ret) {
1441
dev_err(dev, "set sec auth key err!\n");
1442
goto bad_key;
1443
}
1444
1445
ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1446
if (ret) {
1447
dev_err(dev, "set sec fallback key err!\n");
1448
goto bad_key;
1449
}
1450
1451
return 0;
1452
1453
bad_key:
1454
memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
1455
return ret;
1456
}
1457
1458
1459
#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
1460
static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
1461
{ \
1462
return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
1463
}
1464
1465
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
1466
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
1467
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
1468
GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
1469
GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
1470
GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
1471
GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
1472
1473
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1474
{
1475
struct aead_request *aq = req->aead_req.aead_req;
1476
1477
return sec_cipher_map(ctx, req, aq->src, aq->dst);
1478
}
1479
1480
static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1481
{
1482
struct aead_request *aq = req->aead_req.aead_req;
1483
1484
sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1485
}
1486
1487
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1488
{
1489
int ret;
1490
1491
ret = ctx->req_op->buf_map(ctx, req);
1492
if (unlikely(ret))
1493
return ret;
1494
1495
ctx->req_op->do_transfer(ctx, req);
1496
1497
ret = ctx->req_op->bd_fill(ctx, req);
1498
if (unlikely(ret))
1499
goto unmap_req_buf;
1500
1501
return ret;
1502
1503
unmap_req_buf:
1504
ctx->req_op->buf_unmap(ctx, req);
1505
return ret;
1506
}
1507
1508
static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1509
{
1510
ctx->req_op->buf_unmap(ctx, req);
1511
}
1512
1513
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1514
{
1515
struct skcipher_request *sk_req = req->c_req.sk_req;
1516
struct sec_cipher_req *c_req = &req->c_req;
1517
1518
memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1519
}
1520
1521
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1522
{
1523
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1524
struct sec_cipher_req *c_req = &req->c_req;
1525
struct sec_sqe *sec_sqe = &req->sec_sqe;
1526
u8 scene, sa_type, da_type;
1527
u8 bd_type, cipher;
1528
u8 de = 0;
1529
1530
memset(sec_sqe, 0, sizeof(struct sec_sqe));
1531
1532
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1533
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1534
if (req->req_id < 0) {
1535
sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);
1536
sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);
1537
} else {
1538
sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1539
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1540
}
1541
if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr)
1542
de = 0x1 << SEC_DE_OFFSET;
1543
1544
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1545
SEC_CMODE_OFFSET);
1546
sec_sqe->type2.c_alg = c_ctx->c_alg;
1547
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1548
SEC_CKEY_OFFSET);
1549
1550
bd_type = SEC_BD_TYPE2;
1551
if (c_req->encrypt)
1552
cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1553
else
1554
cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1555
sec_sqe->type_cipher_auth = bd_type | cipher;
1556
1557
/* Set destination and source address type */
1558
if (req->use_pbuf) {
1559
sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1560
da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1561
} else {
1562
sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1563
da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1564
}
1565
1566
sec_sqe->sdm_addr_type |= da_type;
1567
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1568
1569
sec_sqe->sds_sa_type = (de | scene | sa_type);
1570
1571
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1572
1573
return 0;
1574
}
1575
1576
static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1577
{
1578
struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1579
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1580
struct sec_cipher_req *c_req = &req->c_req;
1581
u32 bd_param = 0;
1582
u16 cipher;
1583
1584
memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
1585
1586
sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1587
sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1588
if (req->req_id < 0) {
1589
sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);
1590
sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);
1591
} else {
1592
sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1593
sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1594
}
1595
if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr)
1596
bd_param |= 0x1 << SEC_DE_OFFSET_V3;
1597
1598
sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
1599
c_ctx->c_mode;
1600
sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1601
SEC_CKEY_OFFSET_V3);
1602
1603
if (c_req->encrypt)
1604
cipher = SEC_CIPHER_ENC;
1605
else
1606
cipher = SEC_CIPHER_DEC;
1607
sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
1608
1609
/* Set the CTR counter mode is 128bit rollover */
1610
sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
1611
SEC_CTR_CNT_OFFSET);
1612
1613
if (req->use_pbuf) {
1614
bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
1615
bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
1616
} else {
1617
bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
1618
bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
1619
}
1620
1621
bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
1622
1623
bd_param |= SEC_BD_TYPE3;
1624
sec_sqe3->bd_param = cpu_to_le32(bd_param);
1625
1626
sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
1627
sec_sqe3->tag = cpu_to_le64((unsigned long)req);
1628
1629
return 0;
1630
}
1631
1632
/* increment counter (128-bit int) */
1633
static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
1634
{
1635
do {
1636
--bits;
1637
nums += counter[bits];
1638
counter[bits] = nums & BITS_MASK;
1639
nums >>= BYTE_BITS;
1640
} while (bits && nums);
1641
}
1642
1643
static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1644
{
1645
struct aead_request *aead_req = req->aead_req.aead_req;
1646
struct skcipher_request *sk_req = req->c_req.sk_req;
1647
u32 iv_size = req->ctx->c_ctx.ivsize;
1648
struct scatterlist *sgl;
1649
unsigned int cryptlen;
1650
size_t sz;
1651
u8 *iv;
1652
1653
if (alg_type == SEC_SKCIPHER) {
1654
sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;
1655
iv = sk_req->iv;
1656
cryptlen = sk_req->cryptlen;
1657
} else {
1658
sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;
1659
iv = aead_req->iv;
1660
cryptlen = aead_req->cryptlen;
1661
}
1662
1663
if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1664
sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1665
cryptlen - iv_size);
1666
if (unlikely(sz != iv_size))
1667
dev_err(req->ctx->dev, "copy output iv error!\n");
1668
} else {
1669
sz = (cryptlen + iv_size - 1) / iv_size;
1670
ctr_iv_inc(iv, iv_size, sz);
1671
}
1672
}
1673
1674
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1675
int err)
1676
{
1677
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1678
1679
if (req->req_id >= 0)
1680
sec_free_req_id(req);
1681
1682
/* IV output at encrypto of CBC/CTR mode */
1683
if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1684
ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1685
sec_update_iv(req, SEC_SKCIPHER);
1686
1687
crypto_request_complete(req->base, err);
1688
sec_alg_send_backlog(ctx, qp_ctx);
1689
}
1690
1691
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1692
{
1693
struct aead_request *aead_req = req->aead_req.aead_req;
1694
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1695
size_t authsize = crypto_aead_authsize(tfm);
1696
struct sec_aead_req *a_req = &req->aead_req;
1697
struct sec_cipher_req *c_req = &req->c_req;
1698
u32 data_size = aead_req->cryptlen;
1699
u8 flage = 0;
1700
u8 cm, cl;
1701
1702
/* the specification has been checked in aead_iv_demension_check() */
1703
cl = c_req->c_ivin[0] + 1;
1704
c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
1705
memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
1706
c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
1707
1708
/* the last 3bit is L' */
1709
flage |= c_req->c_ivin[0] & IV_CL_MASK;
1710
1711
/* the M' is bit3~bit5, the Flags is bit6 */
1712
cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
1713
flage |= cm << IV_CM_OFFSET;
1714
if (aead_req->assoclen)
1715
flage |= 0x01 << IV_FLAGS_OFFSET;
1716
1717
memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
1718
a_req->a_ivin[0] = flage;
1719
1720
/*
1721
* the last 32bit is counter's initial number,
1722
* but the nonce uses the first 16bit
1723
* the tail 16bit fill with the cipher length
1724
*/
1725
if (!c_req->encrypt)
1726
data_size = aead_req->cryptlen - authsize;
1727
1728
a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
1729
data_size & IV_LAST_BYTE_MASK;
1730
data_size >>= IV_BYTE_OFFSET;
1731
a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
1732
data_size & IV_LAST_BYTE_MASK;
1733
}
1734
1735
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1736
{
1737
struct aead_request *aead_req = req->aead_req.aead_req;
1738
struct sec_aead_req *a_req = &req->aead_req;
1739
struct sec_cipher_req *c_req = &req->c_req;
1740
1741
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1742
1743
if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
1744
/*
1745
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1746
* the counter must set to 0x01
1747
* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
1748
*/
1749
set_aead_auth_iv(ctx, req);
1750
} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
1751
/* GCM 12Byte Cipher_IV == Auth_IV */
1752
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
1753
}
1754
}
1755
1756
static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
1757
struct sec_req *req, struct sec_sqe *sec_sqe)
1758
{
1759
struct sec_aead_req *a_req = &req->aead_req;
1760
struct aead_request *aq = a_req->aead_req;
1761
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1762
size_t authsize = crypto_aead_authsize(tfm);
1763
1764
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1765
sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
1766
1767
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1768
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
1769
sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1770
sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
1771
1772
if (dir)
1773
sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1774
else
1775
sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1776
1777
sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
1778
sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
1779
sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1780
1781
sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1782
}
1783
1784
static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
1785
struct sec_req *req, struct sec_sqe3 *sqe3)
1786
{
1787
struct sec_aead_req *a_req = &req->aead_req;
1788
struct aead_request *aq = a_req->aead_req;
1789
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1790
size_t authsize = crypto_aead_authsize(tfm);
1791
1792
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1793
sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
1794
1795
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1796
sqe3->a_key_addr = sqe3->c_key_addr;
1797
sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1798
sqe3->auth_mac_key |= SEC_NO_AUTH;
1799
1800
if (dir)
1801
sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1802
else
1803
sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1804
1805
sqe3->a_len_key = cpu_to_le32(aq->assoclen);
1806
sqe3->auth_src_offset = cpu_to_le16(0x0);
1807
sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1808
sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1809
}
1810
1811
static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1812
struct sec_req *req, struct sec_sqe *sec_sqe)
1813
{
1814
struct sec_aead_req *a_req = &req->aead_req;
1815
struct sec_cipher_req *c_req = &req->c_req;
1816
struct aead_request *aq = a_req->aead_req;
1817
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1818
size_t authsize = crypto_aead_authsize(tfm);
1819
1820
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1821
1822
sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
1823
1824
sec_sqe->type2.mac_key_alg |=
1825
cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
1826
1827
sec_sqe->type2.mac_key_alg |=
1828
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1829
1830
if (dir) {
1831
sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1832
sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1833
} else {
1834
sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1835
sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1836
}
1837
sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1838
1839
sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1840
1841
sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1842
}
1843
1844
static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1845
{
1846
struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1847
struct sec_sqe *sec_sqe = &req->sec_sqe;
1848
int ret;
1849
1850
ret = sec_skcipher_bd_fill(ctx, req);
1851
if (unlikely(ret)) {
1852
dev_err(ctx->dev, "skcipher bd fill is error!\n");
1853
return ret;
1854
}
1855
1856
if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1857
ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1858
sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1859
else
1860
sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1861
1862
return 0;
1863
}
1864
1865
static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
1866
struct sec_req *req, struct sec_sqe3 *sqe3)
1867
{
1868
struct sec_aead_req *a_req = &req->aead_req;
1869
struct sec_cipher_req *c_req = &req->c_req;
1870
struct aead_request *aq = a_req->aead_req;
1871
struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1872
size_t authsize = crypto_aead_authsize(tfm);
1873
1874
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
1875
1876
sqe3->auth_mac_key |=
1877
cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
1878
1879
sqe3->auth_mac_key |=
1880
cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
1881
1882
sqe3->auth_mac_key |=
1883
cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
1884
1885
if (dir) {
1886
sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1887
sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1888
} else {
1889
sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
1890
sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1891
}
1892
sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
1893
1894
sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1895
1896
sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1897
}
1898
1899
static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1900
{
1901
struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1902
struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1903
int ret;
1904
1905
ret = sec_skcipher_bd_fill_v3(ctx, req);
1906
if (unlikely(ret)) {
1907
dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
1908
return ret;
1909
}
1910
1911
if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1912
ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1913
sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1914
req, sec_sqe3);
1915
else
1916
sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1917
req, sec_sqe3);
1918
1919
return 0;
1920
}
1921
1922
static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1923
{
1924
struct aead_request *a_req = req->aead_req.aead_req;
1925
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1926
size_t authsize = crypto_aead_authsize(tfm);
1927
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1928
size_t sz;
1929
1930
if (!err && req->c_req.encrypt) {
1931
if (c->c_ctx.c_mode == SEC_CMODE_CBC)
1932
sec_update_iv(req, SEC_AEAD);
1933
1934
sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,
1935
authsize, a_req->cryptlen + a_req->assoclen);
1936
if (unlikely(sz != authsize)) {
1937
dev_err(c->dev, "copy out mac err!\n");
1938
err = -EINVAL;
1939
}
1940
}
1941
1942
if (req->req_id >= 0)
1943
sec_free_req_id(req);
1944
1945
crypto_request_complete(req->base, err);
1946
sec_alg_send_backlog(c, qp_ctx);
1947
}
1948
1949
static void sec_request_uninit(struct sec_req *req)
1950
{
1951
if (req->req_id >= 0)
1952
sec_free_req_id(req);
1953
}
1954
1955
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1956
{
1957
struct sec_qp_ctx *qp_ctx;
1958
int i = 0;
1959
1960
do {
1961
qp_ctx = &ctx->qp_ctx[i];
1962
req->req_id = sec_alloc_req_id(req, qp_ctx);
1963
} while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
1964
1965
req->qp_ctx = qp_ctx;
1966
1967
return 0;
1968
}
1969
1970
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1971
{
1972
int ret;
1973
1974
ret = sec_request_init(ctx, req);
1975
if (unlikely(ret))
1976
return ret;
1977
1978
ret = sec_request_transfer(ctx, req);
1979
if (unlikely(ret))
1980
goto err_uninit_req;
1981
1982
/* Output IV as decrypto */
1983
if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1984
ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1985
sec_update_iv(req, ctx->alg_type);
1986
1987
ret = ctx->req_op->bd_send(ctx, req);
1988
if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) {
1989
dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1990
goto err_send_req;
1991
}
1992
1993
return ret;
1994
1995
err_send_req:
1996
/* As failing, restore the IV from user */
1997
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1998
if (ctx->alg_type == SEC_SKCIPHER)
1999
memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
2000
ctx->c_ctx.ivsize);
2001
else
2002
memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,
2003
ctx->c_ctx.ivsize);
2004
}
2005
2006
sec_request_untransfer(ctx, req);
2007
2008
err_uninit_req:
2009
sec_request_uninit(req);
2010
if (ctx->alg_type == SEC_AEAD)
2011
ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
2012
req->c_req.encrypt);
2013
else
2014
ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
2015
req->c_req.encrypt);
2016
return ret;
2017
}
2018
2019
static const struct sec_req_op sec_skcipher_req_ops = {
2020
.buf_map = sec_skcipher_sgl_map,
2021
.buf_unmap = sec_skcipher_sgl_unmap,
2022
.do_transfer = sec_skcipher_copy_iv,
2023
.bd_fill = sec_skcipher_bd_fill,
2024
.bd_send = sec_bd_send,
2025
.callback = sec_skcipher_callback,
2026
.process = sec_process,
2027
};
2028
2029
static const struct sec_req_op sec_aead_req_ops = {
2030
.buf_map = sec_aead_sgl_map,
2031
.buf_unmap = sec_aead_sgl_unmap,
2032
.do_transfer = sec_aead_set_iv,
2033
.bd_fill = sec_aead_bd_fill,
2034
.bd_send = sec_bd_send,
2035
.callback = sec_aead_callback,
2036
.process = sec_process,
2037
};
2038
2039
static const struct sec_req_op sec_skcipher_req_ops_v3 = {
2040
.buf_map = sec_skcipher_sgl_map,
2041
.buf_unmap = sec_skcipher_sgl_unmap,
2042
.do_transfer = sec_skcipher_copy_iv,
2043
.bd_fill = sec_skcipher_bd_fill_v3,
2044
.bd_send = sec_bd_send,
2045
.callback = sec_skcipher_callback,
2046
.process = sec_process,
2047
};
2048
2049
static const struct sec_req_op sec_aead_req_ops_v3 = {
2050
.buf_map = sec_aead_sgl_map,
2051
.buf_unmap = sec_aead_sgl_unmap,
2052
.do_transfer = sec_aead_set_iv,
2053
.bd_fill = sec_aead_bd_fill_v3,
2054
.bd_send = sec_bd_send,
2055
.callback = sec_aead_callback,
2056
.process = sec_process,
2057
};
2058
2059
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
2060
{
2061
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2062
int ret;
2063
2064
ret = sec_skcipher_init(tfm);
2065
if (ret)
2066
return ret;
2067
2068
if (!ctx->qps)
2069
return 0;
2070
2071
if (ctx->sec->qm.ver < QM_HW_V3) {
2072
ctx->type_supported = SEC_BD_TYPE2;
2073
ctx->req_op = &sec_skcipher_req_ops;
2074
} else {
2075
ctx->type_supported = SEC_BD_TYPE3;
2076
ctx->req_op = &sec_skcipher_req_ops_v3;
2077
}
2078
2079
return 0;
2080
}
2081
2082
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
2083
{
2084
sec_skcipher_uninit(tfm);
2085
}
2086
2087
static int sec_aead_init(struct crypto_aead *tfm)
2088
{
2089
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2090
int ret;
2091
2092
crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req));
2093
ctx->alg_type = SEC_AEAD;
2094
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
2095
if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
2096
ctx->c_ctx.ivsize > SEC_IV_SIZE) {
2097
pr_err("get error aead iv size!\n");
2098
return -EINVAL;
2099
}
2100
2101
ret = sec_ctx_base_init(ctx);
2102
if (ret)
2103
return ret;
2104
if (ctx->sec->qm.ver < QM_HW_V3) {
2105
ctx->type_supported = SEC_BD_TYPE2;
2106
ctx->req_op = &sec_aead_req_ops;
2107
} else {
2108
ctx->type_supported = SEC_BD_TYPE3;
2109
ctx->req_op = &sec_aead_req_ops_v3;
2110
}
2111
2112
ret = sec_auth_init(ctx);
2113
if (ret)
2114
goto err_auth_init;
2115
2116
ret = sec_cipher_init(ctx);
2117
if (ret)
2118
goto err_cipher_init;
2119
2120
return ret;
2121
2122
err_cipher_init:
2123
sec_auth_uninit(ctx);
2124
err_auth_init:
2125
sec_ctx_base_uninit(ctx);
2126
return ret;
2127
}
2128
2129
static void sec_aead_exit(struct crypto_aead *tfm)
2130
{
2131
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2132
2133
sec_cipher_uninit(ctx);
2134
sec_auth_uninit(ctx);
2135
sec_ctx_base_uninit(ctx);
2136
}
2137
2138
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
2139
{
2140
struct aead_alg *alg = crypto_aead_alg(tfm);
2141
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2142
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2143
const char *aead_name = alg->base.cra_name;
2144
int ret;
2145
2146
ret = sec_aead_init(tfm);
2147
if (ret && ret != -ENODEV) {
2148
pr_err("hisi_sec2: aead init error!\n");
2149
return ret;
2150
}
2151
2152
a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
2153
if (IS_ERR(a_ctx->hash_tfm)) {
2154
dev_err(ctx->dev, "aead alloc shash error!\n");
2155
sec_aead_exit(tfm);
2156
return PTR_ERR(a_ctx->hash_tfm);
2157
}
2158
2159
a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
2160
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
2161
if (IS_ERR(a_ctx->fallback_aead_tfm)) {
2162
dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
2163
crypto_free_shash(ctx->a_ctx.hash_tfm);
2164
sec_aead_exit(tfm);
2165
return PTR_ERR(a_ctx->fallback_aead_tfm);
2166
}
2167
2168
return 0;
2169
}
2170
2171
static void sec_aead_ctx_exit(struct crypto_aead *tfm)
2172
{
2173
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2174
2175
crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
2176
crypto_free_shash(ctx->a_ctx.hash_tfm);
2177
sec_aead_exit(tfm);
2178
}
2179
2180
static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
2181
{
2182
struct aead_alg *alg = crypto_aead_alg(tfm);
2183
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2184
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2185
const char *aead_name = alg->base.cra_name;
2186
int ret;
2187
2188
ret = sec_aead_init(tfm);
2189
if (ret && ret != -ENODEV) {
2190
dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
2191
return ret;
2192
}
2193
2194
a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
2195
CRYPTO_ALG_NEED_FALLBACK |
2196
CRYPTO_ALG_ASYNC);
2197
if (IS_ERR(a_ctx->fallback_aead_tfm)) {
2198
dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
2199
sec_aead_exit(tfm);
2200
return PTR_ERR(a_ctx->fallback_aead_tfm);
2201
}
2202
2203
return 0;
2204
}
2205
2206
static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
2207
{
2208
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2209
2210
crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
2211
sec_aead_exit(tfm);
2212
}
2213
2214
static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
2215
{
2216
return sec_aead_ctx_init(tfm, "sha1");
2217
}
2218
2219
static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
2220
{
2221
return sec_aead_ctx_init(tfm, "sha256");
2222
}
2223
2224
static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
2225
{
2226
return sec_aead_ctx_init(tfm, "sha512");
2227
}
2228
2229
static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
2230
{
2231
u32 cryptlen = sreq->c_req.sk_req->cryptlen;
2232
struct device *dev = ctx->dev;
2233
u8 c_mode = ctx->c_ctx.c_mode;
2234
int ret = 0;
2235
2236
switch (c_mode) {
2237
case SEC_CMODE_XTS:
2238
if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
2239
dev_err(dev, "skcipher XTS mode input length error!\n");
2240
ret = -EINVAL;
2241
}
2242
break;
2243
case SEC_CMODE_ECB:
2244
case SEC_CMODE_CBC:
2245
if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
2246
dev_err(dev, "skcipher AES input length error!\n");
2247
ret = -EINVAL;
2248
}
2249
break;
2250
case SEC_CMODE_CTR:
2251
break;
2252
default:
2253
ret = -EINVAL;
2254
}
2255
2256
return ret;
2257
}
2258
2259
static int sec_skcipher_param_check(struct sec_ctx *ctx,
2260
struct sec_req *sreq, bool *need_fallback)
2261
{
2262
struct skcipher_request *sk_req = sreq->c_req.sk_req;
2263
struct device *dev = ctx->dev;
2264
u8 c_alg = ctx->c_ctx.c_alg;
2265
2266
if (unlikely(!sk_req->src || !sk_req->dst)) {
2267
dev_err(dev, "skcipher input param error!\n");
2268
return -EINVAL;
2269
}
2270
2271
if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
2272
*need_fallback = true;
2273
2274
sreq->c_req.c_len = sk_req->cryptlen;
2275
2276
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
2277
sreq->use_pbuf = true;
2278
else
2279
sreq->use_pbuf = false;
2280
2281
if (c_alg == SEC_CALG_3DES) {
2282
if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2283
dev_err(dev, "skcipher 3des input length error!\n");
2284
return -EINVAL;
2285
}
2286
return 0;
2287
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2288
return sec_skcipher_cryptlen_check(ctx, sreq);
2289
}
2290
2291
dev_err(dev, "skcipher algorithm error!\n");
2292
2293
return -EINVAL;
2294
}
2295
2296
static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
2297
struct skcipher_request *sreq, bool encrypt)
2298
{
2299
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
2300
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
2301
struct device *dev = ctx->dev;
2302
int ret;
2303
2304
if (!c_ctx->fbtfm) {
2305
dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
2306
return -EINVAL;
2307
}
2308
2309
skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
2310
2311
/* software need sync mode to do crypto */
2312
skcipher_request_set_callback(subreq, sreq->base.flags,
2313
NULL, NULL);
2314
skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
2315
sreq->cryptlen, sreq->iv);
2316
if (encrypt)
2317
ret = crypto_skcipher_encrypt(subreq);
2318
else
2319
ret = crypto_skcipher_decrypt(subreq);
2320
2321
skcipher_request_zero(subreq);
2322
2323
return ret;
2324
}
2325
2326
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2327
{
2328
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
2329
struct sec_req *req = skcipher_request_ctx_dma(sk_req);
2330
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2331
bool need_fallback = false;
2332
int ret;
2333
2334
if (!ctx->qps)
2335
goto soft_crypto;
2336
2337
if (!sk_req->cryptlen) {
2338
if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
2339
return -EINVAL;
2340
return 0;
2341
}
2342
2343
req->flag = sk_req->base.flags;
2344
req->c_req.sk_req = sk_req;
2345
req->c_req.encrypt = encrypt;
2346
req->ctx = ctx;
2347
req->base = &sk_req->base;
2348
2349
ret = sec_skcipher_param_check(ctx, req, &need_fallback);
2350
if (unlikely(ret))
2351
return -EINVAL;
2352
2353
if (unlikely(ctx->c_ctx.fallback || need_fallback))
2354
goto soft_crypto;
2355
2356
return ctx->req_op->process(ctx, req);
2357
2358
soft_crypto:
2359
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
2360
}
2361
2362
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
2363
{
2364
return sec_skcipher_crypto(sk_req, true);
2365
}
2366
2367
static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
2368
{
2369
return sec_skcipher_crypto(sk_req, false);
2370
}
2371
2372
#define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
2373
sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
2374
{\
2375
.base = {\
2376
.cra_name = sec_cra_name,\
2377
.cra_driver_name = "hisi_sec_"sec_cra_name,\
2378
.cra_priority = SEC_PRIORITY,\
2379
.cra_flags = CRYPTO_ALG_ASYNC |\
2380
CRYPTO_ALG_NEED_FALLBACK,\
2381
.cra_blocksize = blk_size,\
2382
.cra_ctxsize = sizeof(struct sec_ctx),\
2383
.cra_module = THIS_MODULE,\
2384
},\
2385
.init = sec_skcipher_ctx_init,\
2386
.exit = sec_skcipher_ctx_exit,\
2387
.setkey = sec_set_key,\
2388
.decrypt = sec_skcipher_decrypt,\
2389
.encrypt = sec_skcipher_encrypt,\
2390
.min_keysize = sec_min_key_size,\
2391
.max_keysize = sec_max_key_size,\
2392
.ivsize = iv_size,\
2393
}
2394
2395
static struct sec_skcipher sec_skciphers[] = {
2396
{
2397
.alg_msk = BIT(0),
2398
.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
2399
AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
2400
},
2401
{
2402
.alg_msk = BIT(1),
2403
.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
2404
AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2405
},
2406
{
2407
.alg_msk = BIT(2),
2408
.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
2409
AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2410
},
2411
{
2412
.alg_msk = BIT(3),
2413
.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
2414
SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2415
},
2416
{
2417
.alg_msk = BIT(12),
2418
.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
2419
AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2420
},
2421
{
2422
.alg_msk = BIT(13),
2423
.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
2424
AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2425
},
2426
{
2427
.alg_msk = BIT(14),
2428
.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
2429
SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2430
},
2431
{
2432
.alg_msk = BIT(23),
2433
.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
2434
SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
2435
},
2436
{
2437
.alg_msk = BIT(24),
2438
.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
2439
SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
2440
DES3_EDE_BLOCK_SIZE),
2441
},
2442
};
2443
2444
static int aead_iv_demension_check(struct aead_request *aead_req)
2445
{
2446
u8 cl;
2447
2448
cl = aead_req->iv[0] + 1;
2449
if (cl < IV_CL_MIN || cl > IV_CL_MAX)
2450
return -EINVAL;
2451
2452
if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
2453
return -EOVERFLOW;
2454
2455
return 0;
2456
}
2457
2458
static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2459
{
2460
struct aead_request *req = sreq->aead_req.aead_req;
2461
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2462
size_t sz = crypto_aead_authsize(tfm);
2463
u8 c_mode = ctx->c_ctx.c_mode;
2464
int ret;
2465
2466
if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
2467
return -EINVAL;
2468
2469
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2470
req->assoclen > SEC_MAX_AAD_LEN))
2471
return -EINVAL;
2472
2473
if (c_mode == SEC_CMODE_CCM) {
2474
if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
2475
return -EINVAL;
2476
2477
ret = aead_iv_demension_check(req);
2478
if (unlikely(ret))
2479
return -EINVAL;
2480
} else if (c_mode == SEC_CMODE_CBC) {
2481
if (unlikely(sz & WORD_MASK))
2482
return -EINVAL;
2483
if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
2484
return -EINVAL;
2485
} else if (c_mode == SEC_CMODE_GCM) {
2486
if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ))
2487
return -EINVAL;
2488
}
2489
2490
return 0;
2491
}
2492
2493
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
2494
{
2495
struct aead_request *req = sreq->aead_req.aead_req;
2496
struct device *dev = ctx->dev;
2497
u8 c_alg = ctx->c_ctx.c_alg;
2498
2499
if (unlikely(!req->src || !req->dst)) {
2500
dev_err(dev, "aead input param error!\n");
2501
return -EINVAL;
2502
}
2503
2504
if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
2505
sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2506
dev_err(dev, "aead cbc mode input data length error!\n");
2507
return -EINVAL;
2508
}
2509
2510
/* Support AES or SM4 */
2511
if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
2512
dev_err(dev, "aead crypto alg error!\n");
2513
return -EINVAL;
2514
}
2515
2516
if (unlikely(sec_aead_spec_check(ctx, sreq))) {
2517
*need_fallback = true;
2518
return -EINVAL;
2519
}
2520
2521
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2522
SEC_PBUF_SZ)
2523
sreq->use_pbuf = true;
2524
else
2525
sreq->use_pbuf = false;
2526
2527
return 0;
2528
}
2529
2530
static int sec_aead_soft_crypto(struct sec_ctx *ctx,
2531
struct aead_request *aead_req,
2532
bool encrypt)
2533
{
2534
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2535
struct aead_request *subreq;
2536
int ret;
2537
2538
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
2539
if (!subreq)
2540
return -ENOMEM;
2541
2542
aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
2543
aead_request_set_callback(subreq, aead_req->base.flags,
2544
aead_req->base.complete, aead_req->base.data);
2545
aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
2546
aead_req->cryptlen, aead_req->iv);
2547
aead_request_set_ad(subreq, aead_req->assoclen);
2548
2549
if (encrypt)
2550
ret = crypto_aead_encrypt(subreq);
2551
else
2552
ret = crypto_aead_decrypt(subreq);
2553
aead_request_free(subreq);
2554
2555
return ret;
2556
}
2557
2558
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2559
{
2560
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2561
struct sec_req *req = aead_request_ctx_dma(a_req);
2562
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2563
size_t sz = crypto_aead_authsize(tfm);
2564
bool need_fallback = false;
2565
int ret;
2566
2567
if (!ctx->qps)
2568
goto soft_crypto;
2569
2570
req->flag = a_req->base.flags;
2571
req->aead_req.aead_req = a_req;
2572
req->c_req.encrypt = encrypt;
2573
req->ctx = ctx;
2574
req->base = &a_req->base;
2575
req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
2576
2577
ret = sec_aead_param_check(ctx, req, &need_fallback);
2578
if (unlikely(ret)) {
2579
if (need_fallback)
2580
goto soft_crypto;
2581
return -EINVAL;
2582
}
2583
2584
return ctx->req_op->process(ctx, req);
2585
2586
soft_crypto:
2587
return sec_aead_soft_crypto(ctx, a_req, encrypt);
2588
}
2589
2590
static int sec_aead_encrypt(struct aead_request *a_req)
2591
{
2592
return sec_aead_crypto(a_req, true);
2593
}
2594
2595
static int sec_aead_decrypt(struct aead_request *a_req)
2596
{
2597
return sec_aead_crypto(a_req, false);
2598
}
2599
2600
#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2601
ctx_exit, blk_size, iv_size, max_authsize)\
2602
{\
2603
.base = {\
2604
.cra_name = sec_cra_name,\
2605
.cra_driver_name = "hisi_sec_"sec_cra_name,\
2606
.cra_priority = SEC_PRIORITY,\
2607
.cra_flags = CRYPTO_ALG_ASYNC |\
2608
CRYPTO_ALG_NEED_FALLBACK,\
2609
.cra_blocksize = blk_size,\
2610
.cra_ctxsize = sizeof(struct sec_ctx),\
2611
.cra_module = THIS_MODULE,\
2612
},\
2613
.init = ctx_init,\
2614
.exit = ctx_exit,\
2615
.setkey = sec_set_key,\
2616
.setauthsize = sec_aead_setauthsize,\
2617
.decrypt = sec_aead_decrypt,\
2618
.encrypt = sec_aead_encrypt,\
2619
.ivsize = iv_size,\
2620
.maxauthsize = max_authsize,\
2621
}
2622
2623
static struct sec_aead sec_aeads[] = {
2624
{
2625
.alg_msk = BIT(6),
2626
.alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
2627
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2628
AES_BLOCK_SIZE),
2629
},
2630
{
2631
.alg_msk = BIT(7),
2632
.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
2633
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2634
AES_BLOCK_SIZE),
2635
},
2636
{
2637
.alg_msk = BIT(17),
2638
.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
2639
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2640
AES_BLOCK_SIZE),
2641
},
2642
{
2643
.alg_msk = BIT(18),
2644
.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
2645
sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2646
AES_BLOCK_SIZE),
2647
},
2648
{
2649
.alg_msk = BIT(43),
2650
.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
2651
sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2652
AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2653
},
2654
{
2655
.alg_msk = BIT(44),
2656
.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
2657
sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2658
AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2659
},
2660
{
2661
.alg_msk = BIT(45),
2662
.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
2663
sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2664
AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
2665
},
2666
};
2667
2668
static void sec_unregister_skcipher(u64 alg_mask, int end)
2669
{
2670
int i;
2671
2672
for (i = 0; i < end; i++)
2673
if (sec_skciphers[i].alg_msk & alg_mask)
2674
crypto_unregister_skcipher(&sec_skciphers[i].alg);
2675
}
2676
2677
static int sec_register_skcipher(u64 alg_mask)
2678
{
2679
int i, ret, count;
2680
2681
count = ARRAY_SIZE(sec_skciphers);
2682
2683
for (i = 0; i < count; i++) {
2684
if (!(sec_skciphers[i].alg_msk & alg_mask))
2685
continue;
2686
2687
ret = crypto_register_skcipher(&sec_skciphers[i].alg);
2688
if (ret)
2689
goto err;
2690
}
2691
2692
return 0;
2693
2694
err:
2695
sec_unregister_skcipher(alg_mask, i);
2696
2697
return ret;
2698
}
2699
2700
static void sec_unregister_aead(u64 alg_mask, int end)
2701
{
2702
int i;
2703
2704
for (i = 0; i < end; i++)
2705
if (sec_aeads[i].alg_msk & alg_mask)
2706
crypto_unregister_aead(&sec_aeads[i].alg);
2707
}
2708
2709
static int sec_register_aead(u64 alg_mask)
2710
{
2711
int i, ret, count;
2712
2713
count = ARRAY_SIZE(sec_aeads);
2714
2715
for (i = 0; i < count; i++) {
2716
if (!(sec_aeads[i].alg_msk & alg_mask))
2717
continue;
2718
2719
ret = crypto_register_aead(&sec_aeads[i].alg);
2720
if (ret)
2721
goto err;
2722
}
2723
2724
return 0;
2725
2726
err:
2727
sec_unregister_aead(alg_mask, i);
2728
2729
return ret;
2730
}
2731
2732
int sec_register_to_crypto(struct hisi_qm *qm)
2733
{
2734
u64 alg_mask;
2735
int ret = 0;
2736
2737
alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2738
SEC_DRV_ALG_BITMAP_LOW_TB);
2739
2740
mutex_lock(&sec_algs_lock);
2741
if (sec_available_devs) {
2742
sec_available_devs++;
2743
goto unlock;
2744
}
2745
2746
ret = sec_register_skcipher(alg_mask);
2747
if (ret)
2748
goto unlock;
2749
2750
ret = sec_register_aead(alg_mask);
2751
if (ret)
2752
goto unreg_skcipher;
2753
2754
sec_available_devs++;
2755
mutex_unlock(&sec_algs_lock);
2756
2757
return 0;
2758
2759
unreg_skcipher:
2760
sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2761
unlock:
2762
mutex_unlock(&sec_algs_lock);
2763
return ret;
2764
}
2765
2766
void sec_unregister_from_crypto(struct hisi_qm *qm)
2767
{
2768
u64 alg_mask;
2769
2770
alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2771
SEC_DRV_ALG_BITMAP_LOW_TB);
2772
2773
mutex_lock(&sec_algs_lock);
2774
if (--sec_available_devs)
2775
goto unlock;
2776
2777
sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
2778
sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2779
2780
unlock:
2781
mutex_unlock(&sec_algs_lock);
2782
}
2783
2784