Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/hisilicon/zip/zip_crypto.c
51347 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (c) 2019 HiSilicon Limited. */
3
#include <crypto/internal/acompress.h>
4
#include <linux/bitfield.h>
5
#include <linux/bitmap.h>
6
#include <linux/dma-mapping.h>
7
#include <linux/scatterlist.h>
8
#include "zip.h"
9
10
/* hisi_zip_sqe dw3 */
11
#define HZIP_BD_STATUS_M GENMASK(7, 0)
12
/* hisi_zip_sqe dw7 */
13
#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
14
#define HZIP_SQE_TYPE_M GENMASK(31, 28)
15
/* hisi_zip_sqe dw8 */
16
#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
17
/* hisi_zip_sqe dw9 */
18
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
19
#define HZIP_ALG_TYPE_DEFLATE 0x01
20
#define HZIP_ALG_TYPE_LZ4 0x04
21
#define HZIP_BUF_TYPE_M GENMASK(11, 8)
22
#define HZIP_SGL 0x1
23
#define HZIP_WIN_SIZE_M GENMASK(15, 12)
24
#define HZIP_16K_WINSZ 0x2
25
26
#define HZIP_ALG_PRIORITY 300
27
#define HZIP_SGL_SGE_NR 10
28
29
#define HZIP_ALG_DEFLATE GENMASK(5, 4)
30
#define HZIP_ALG_LZ4 BIT(8)
31
32
static DEFINE_MUTEX(zip_algs_lock);
33
static unsigned int zip_available_devs;
34
35
enum hisi_zip_alg_type {
36
HZIP_ALG_TYPE_COMP = 0,
37
HZIP_ALG_TYPE_DECOMP = 1,
38
};
39
40
enum {
41
HZIP_QPC_COMP,
42
HZIP_QPC_DECOMP,
43
HZIP_CTX_Q_NUM
44
};
45
46
#define GET_REQ_FROM_SQE(sqe) ((u64)(sqe)->dw26 | (u64)(sqe)->dw27 << 32)
47
#define COMP_NAME_TO_TYPE(alg_name) \
48
(!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : \
49
(!strcmp((alg_name), "lz4") ? HZIP_ALG_TYPE_LZ4 : 0))
50
51
struct hisi_zip_req {
52
struct acomp_req *req;
53
struct hisi_acc_hw_sgl *hw_src;
54
struct hisi_acc_hw_sgl *hw_dst;
55
dma_addr_t dma_src;
56
dma_addr_t dma_dst;
57
struct hisi_zip_qp_ctx *qp_ctx;
58
u16 req_id;
59
};
60
61
struct hisi_zip_req_q {
62
struct hisi_zip_req *q;
63
unsigned long *req_bitmap;
64
spinlock_t req_lock;
65
u16 size;
66
};
67
68
struct hisi_zip_qp_ctx {
69
struct hisi_qp *qp;
70
struct hisi_zip_req_q req_q;
71
struct hisi_acc_sgl_pool *sgl_pool;
72
struct hisi_zip *zip_dev;
73
struct hisi_zip_ctx *ctx;
74
u8 req_type;
75
};
76
77
struct hisi_zip_sqe_ops {
78
u8 sqe_type;
79
void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
80
void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
81
void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
82
void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
83
void (*fill_win_size)(struct hisi_zip_sqe *sqe, u8 win_size);
84
void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
85
void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
86
u32 (*get_status)(struct hisi_zip_sqe *sqe);
87
u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
88
};
89
90
struct hisi_zip_ctx {
91
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
92
const struct hisi_zip_sqe_ops *ops;
93
bool fallback;
94
};
95
96
static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
97
{
98
int ret;
99
u16 n;
100
101
if (!val)
102
return -EINVAL;
103
104
ret = kstrtou16(val, 10, &n);
105
if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
106
return -EINVAL;
107
108
return param_set_ushort(val, kp);
109
}
110
111
static const struct kernel_param_ops sgl_sge_nr_ops = {
112
.set = sgl_sge_nr_set,
113
.get = param_get_ushort,
114
};
115
116
static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
117
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
118
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
119
120
static int hisi_zip_fallback_do_work(struct acomp_req *acomp_req, bool is_decompress)
121
{
122
ACOMP_FBREQ_ON_STACK(fbreq, acomp_req);
123
int ret;
124
125
if (!is_decompress)
126
ret = crypto_acomp_compress(fbreq);
127
else
128
ret = crypto_acomp_decompress(fbreq);
129
if (ret) {
130
pr_err("failed to do fallback work, ret=%d\n", ret);
131
return ret;
132
}
133
134
acomp_req->dlen = fbreq->dlen;
135
return ret;
136
}
137
138
static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
139
struct acomp_req *req)
140
{
141
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
142
struct hisi_zip_req *q = req_q->q;
143
struct hisi_zip_req *req_cache;
144
int req_id;
145
146
spin_lock(&req_q->req_lock);
147
148
req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
149
if (req_id >= req_q->size) {
150
spin_unlock(&req_q->req_lock);
151
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
152
return ERR_PTR(-EAGAIN);
153
}
154
set_bit(req_id, req_q->req_bitmap);
155
156
spin_unlock(&req_q->req_lock);
157
158
req_cache = q + req_id;
159
req_cache->req_id = req_id;
160
req_cache->req = req;
161
req_cache->qp_ctx = qp_ctx;
162
163
return req_cache;
164
}
165
166
static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
167
struct hisi_zip_req *req)
168
{
169
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
170
171
spin_lock(&req_q->req_lock);
172
clear_bit(req->req_id, req_q->req_bitmap);
173
spin_unlock(&req_q->req_lock);
174
}
175
176
static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
177
{
178
sqe->source_addr_l = lower_32_bits(req->dma_src);
179
sqe->source_addr_h = upper_32_bits(req->dma_src);
180
sqe->dest_addr_l = lower_32_bits(req->dma_dst);
181
sqe->dest_addr_h = upper_32_bits(req->dma_dst);
182
}
183
184
static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
185
{
186
struct acomp_req *a_req = req->req;
187
188
sqe->input_data_length = a_req->slen;
189
sqe->dest_avail_out = a_req->dlen;
190
}
191
192
static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
193
{
194
u32 val;
195
196
val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
197
val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
198
sqe->dw9 = val;
199
}
200
201
static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
202
{
203
u32 val;
204
205
val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
206
val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
207
sqe->dw9 = val;
208
}
209
210
static void hisi_zip_fill_win_size(struct hisi_zip_sqe *sqe, u8 win_size)
211
{
212
u32 val;
213
214
val = sqe->dw9 & ~HZIP_WIN_SIZE_M;
215
val |= FIELD_PREP(HZIP_WIN_SIZE_M, win_size);
216
sqe->dw9 = val;
217
}
218
219
static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
220
{
221
sqe->dw26 = lower_32_bits((u64)req);
222
sqe->dw27 = upper_32_bits((u64)req);
223
}
224
225
static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
226
{
227
u32 val;
228
229
val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
230
val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
231
sqe->dw7 = val;
232
}
233
234
static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
235
u8 req_type, struct hisi_zip_req *req)
236
{
237
const struct hisi_zip_sqe_ops *ops = ctx->ops;
238
239
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
240
241
ops->fill_addr(sqe, req);
242
ops->fill_buf_size(sqe, req);
243
ops->fill_buf_type(sqe, HZIP_SGL);
244
ops->fill_req_type(sqe, req_type);
245
ops->fill_win_size(sqe, HZIP_16K_WINSZ);
246
ops->fill_tag(sqe, req);
247
ops->fill_sqe_type(sqe, ops->sqe_type);
248
}
249
250
static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
251
struct hisi_zip_req *req)
252
{
253
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
254
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
255
struct acomp_req *a_req = req->req;
256
struct hisi_qp *qp = qp_ctx->qp;
257
struct device *dev = &qp->qm->pdev->dev;
258
struct hisi_zip_sqe zip_sqe;
259
int ret;
260
261
if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
262
return -EINVAL;
263
264
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
265
req->req_id << 1, &req->dma_src,
266
DMA_TO_DEVICE);
267
if (IS_ERR(req->hw_src)) {
268
dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
269
PTR_ERR(req->hw_src));
270
return PTR_ERR(req->hw_src);
271
}
272
273
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
274
(req->req_id << 1) + 1,
275
&req->dma_dst, DMA_FROM_DEVICE);
276
if (IS_ERR(req->hw_dst)) {
277
ret = PTR_ERR(req->hw_dst);
278
dev_err(dev, "failed to map the dst buffer to hw sgl (%d)!\n",
279
ret);
280
goto err_unmap_input;
281
}
282
283
hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp_ctx->req_type, req);
284
285
/* send command to start a task */
286
atomic64_inc(&dfx->send_cnt);
287
ret = hisi_qp_send(qp, &zip_sqe);
288
if (unlikely(ret < 0)) {
289
atomic64_inc(&dfx->send_busy_cnt);
290
ret = -EAGAIN;
291
dev_dbg_ratelimited(dev, "failed to send request!\n");
292
goto err_unmap_output;
293
}
294
295
return -EINPROGRESS;
296
297
err_unmap_output:
298
hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst, DMA_FROM_DEVICE);
299
err_unmap_input:
300
hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src, DMA_TO_DEVICE);
301
return ret;
302
}
303
304
static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
305
{
306
return sqe->dw3 & HZIP_BD_STATUS_M;
307
}
308
309
static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
310
{
311
return sqe->produced;
312
}
313
314
static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
315
{
316
struct hisi_zip_sqe *sqe = data;
317
struct hisi_zip_req *req = (struct hisi_zip_req *)GET_REQ_FROM_SQE(sqe);
318
struct hisi_zip_qp_ctx *qp_ctx = req->qp_ctx;
319
const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
320
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
321
struct device *dev = &qp->qm->pdev->dev;
322
struct acomp_req *acomp_req = req->req;
323
int err = 0;
324
u32 status;
325
326
atomic64_inc(&dfx->recv_cnt);
327
status = ops->get_status(sqe);
328
if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
329
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
330
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
331
sqe->produced);
332
atomic64_inc(&dfx->err_bd_cnt);
333
err = -EIO;
334
}
335
336
hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst, DMA_FROM_DEVICE);
337
hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src, DMA_TO_DEVICE);
338
339
acomp_req->dlen = ops->get_dstlen(sqe);
340
341
if (acomp_req->base.complete)
342
acomp_request_complete(acomp_req, err);
343
344
hisi_zip_remove_req(qp_ctx, req);
345
}
346
347
static int hisi_zip_acompress(struct acomp_req *acomp_req)
348
{
349
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
350
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
351
struct hisi_zip_req *req;
352
struct device *dev;
353
int ret;
354
355
if (ctx->fallback)
356
return hisi_zip_fallback_do_work(acomp_req, 0);
357
358
dev = &qp_ctx->qp->qm->pdev->dev;
359
360
req = hisi_zip_create_req(qp_ctx, acomp_req);
361
if (IS_ERR(req))
362
return PTR_ERR(req);
363
364
ret = hisi_zip_do_work(qp_ctx, req);
365
if (unlikely(ret != -EINPROGRESS)) {
366
dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
367
hisi_zip_remove_req(qp_ctx, req);
368
}
369
370
return ret;
371
}
372
373
static int hisi_zip_adecompress(struct acomp_req *acomp_req)
374
{
375
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
376
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
377
struct hisi_zip_req *req;
378
struct device *dev;
379
int ret;
380
381
if (ctx->fallback)
382
return hisi_zip_fallback_do_work(acomp_req, 1);
383
384
dev = &qp_ctx->qp->qm->pdev->dev;
385
386
req = hisi_zip_create_req(qp_ctx, acomp_req);
387
if (IS_ERR(req))
388
return PTR_ERR(req);
389
390
ret = hisi_zip_do_work(qp_ctx, req);
391
if (unlikely(ret != -EINPROGRESS)) {
392
dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
393
ret);
394
hisi_zip_remove_req(qp_ctx, req);
395
}
396
397
return ret;
398
}
399
400
static int hisi_zip_decompress(struct acomp_req *acomp_req)
401
{
402
return hisi_zip_fallback_do_work(acomp_req, 1);
403
}
404
405
static const struct hisi_zip_sqe_ops hisi_zip_ops = {
406
.sqe_type = 0x3,
407
.fill_addr = hisi_zip_fill_addr,
408
.fill_buf_size = hisi_zip_fill_buf_size,
409
.fill_buf_type = hisi_zip_fill_buf_type,
410
.fill_req_type = hisi_zip_fill_req_type,
411
.fill_win_size = hisi_zip_fill_win_size,
412
.fill_tag = hisi_zip_fill_tag,
413
.fill_sqe_type = hisi_zip_fill_sqe_type,
414
.get_status = hisi_zip_get_status,
415
.get_dstlen = hisi_zip_get_dstlen,
416
};
417
418
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
419
{
420
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
421
struct hisi_zip_qp_ctx *qp_ctx;
422
u8 alg_type[HZIP_CTX_Q_NUM];
423
struct hisi_zip *hisi_zip;
424
int ret, i;
425
426
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
427
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
428
alg_type[i] = i;
429
430
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node, alg_type);
431
if (ret) {
432
pr_err("failed to create zip qps (%d)!\n", ret);
433
return -ENODEV;
434
}
435
436
hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
437
438
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
439
qp_ctx = &hisi_zip_ctx->qp_ctx[i];
440
qp_ctx->ctx = hisi_zip_ctx;
441
qp_ctx->zip_dev = hisi_zip;
442
qp_ctx->req_type = req_type;
443
qp_ctx->qp = qps[i];
444
}
445
446
hisi_zip_ctx->ops = &hisi_zip_ops;
447
448
return 0;
449
}
450
451
static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
452
{
453
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
454
int i;
455
456
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
457
qps[i] = hisi_zip_ctx->qp_ctx[i].qp;
458
459
hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
460
}
461
462
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
463
{
464
u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
465
struct hisi_zip_req_q *req_q;
466
int i, ret;
467
468
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
469
req_q = &ctx->qp_ctx[i].req_q;
470
req_q->size = q_depth;
471
472
req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
473
if (!req_q->req_bitmap) {
474
ret = -ENOMEM;
475
if (i == 0)
476
return ret;
477
478
goto err_free_comp_q;
479
}
480
spin_lock_init(&req_q->req_lock);
481
482
req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
483
GFP_KERNEL);
484
if (!req_q->q) {
485
ret = -ENOMEM;
486
if (i == 0)
487
goto err_free_comp_bitmap;
488
else
489
goto err_free_decomp_bitmap;
490
}
491
}
492
493
return 0;
494
495
err_free_decomp_bitmap:
496
bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
497
err_free_comp_q:
498
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
499
err_free_comp_bitmap:
500
bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
501
return ret;
502
}
503
504
static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
505
{
506
int i;
507
508
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
509
kfree(ctx->qp_ctx[i].req_q.q);
510
bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
511
}
512
}
513
514
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
515
{
516
u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
517
struct hisi_zip_qp_ctx *tmp;
518
struct device *dev;
519
int i;
520
521
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
522
tmp = &ctx->qp_ctx[i];
523
dev = &tmp->qp->qm->pdev->dev;
524
tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
525
sgl_sge_nr);
526
if (IS_ERR(tmp->sgl_pool)) {
527
if (i == 1)
528
goto err_free_sgl_pool0;
529
return -ENOMEM;
530
}
531
}
532
533
return 0;
534
535
err_free_sgl_pool0:
536
hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
537
ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
538
return -ENOMEM;
539
}
540
541
static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
542
{
543
int i;
544
545
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
546
hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
547
ctx->qp_ctx[i].sgl_pool);
548
}
549
550
static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
551
void (*fn)(struct hisi_qp *, void *))
552
{
553
int i;
554
555
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
556
ctx->qp_ctx[i].qp->req_cb = fn;
557
}
558
559
static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
560
{
561
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
562
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
563
struct device *dev;
564
int ret;
565
566
ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
567
if (ret) {
568
pr_err("failed to init ctx (%d)!\n", ret);
569
goto switch_to_soft;
570
}
571
572
dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
573
574
ret = hisi_zip_create_req_q(ctx);
575
if (ret) {
576
dev_err(dev, "failed to create request queue (%d)!\n", ret);
577
goto err_ctx_exit;
578
}
579
580
ret = hisi_zip_create_sgl_pool(ctx);
581
if (ret) {
582
dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
583
goto err_release_req_q;
584
}
585
586
hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
587
588
return 0;
589
590
err_release_req_q:
591
hisi_zip_release_req_q(ctx);
592
err_ctx_exit:
593
hisi_zip_ctx_exit(ctx);
594
switch_to_soft:
595
ctx->fallback = true;
596
return 0;
597
}
598
599
static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
600
{
601
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
602
603
if (ctx->fallback)
604
return;
605
606
hisi_zip_release_sgl_pool(ctx);
607
hisi_zip_release_req_q(ctx);
608
hisi_zip_ctx_exit(ctx);
609
}
610
611
static struct acomp_alg hisi_zip_acomp_deflate = {
612
.init = hisi_zip_acomp_init,
613
.exit = hisi_zip_acomp_exit,
614
.compress = hisi_zip_acompress,
615
.decompress = hisi_zip_adecompress,
616
.base = {
617
.cra_name = "deflate",
618
.cra_driver_name = "hisi-deflate-acomp",
619
.cra_flags = CRYPTO_ALG_ASYNC |
620
CRYPTO_ALG_NEED_FALLBACK,
621
.cra_module = THIS_MODULE,
622
.cra_priority = HZIP_ALG_PRIORITY,
623
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
624
}
625
};
626
627
static int hisi_zip_register_deflate(struct hisi_qm *qm)
628
{
629
int ret;
630
631
if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
632
return 0;
633
634
ret = crypto_register_acomp(&hisi_zip_acomp_deflate);
635
if (ret)
636
dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret);
637
638
return ret;
639
}
640
641
static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
642
{
643
if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
644
return;
645
646
crypto_unregister_acomp(&hisi_zip_acomp_deflate);
647
}
648
649
static struct acomp_alg hisi_zip_acomp_lz4 = {
650
.init = hisi_zip_acomp_init,
651
.exit = hisi_zip_acomp_exit,
652
.compress = hisi_zip_acompress,
653
.decompress = hisi_zip_decompress,
654
.base = {
655
.cra_name = "lz4",
656
.cra_driver_name = "hisi-lz4-acomp",
657
.cra_flags = CRYPTO_ALG_ASYNC |
658
CRYPTO_ALG_NEED_FALLBACK,
659
.cra_module = THIS_MODULE,
660
.cra_priority = HZIP_ALG_PRIORITY,
661
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
662
}
663
};
664
665
static int hisi_zip_register_lz4(struct hisi_qm *qm)
666
{
667
int ret;
668
669
if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
670
return 0;
671
672
ret = crypto_register_acomp(&hisi_zip_acomp_lz4);
673
if (ret)
674
dev_err(&qm->pdev->dev, "failed to register to LZ4 (%d)!\n", ret);
675
676
return ret;
677
}
678
679
static void hisi_zip_unregister_lz4(struct hisi_qm *qm)
680
{
681
if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
682
return;
683
684
crypto_unregister_acomp(&hisi_zip_acomp_lz4);
685
}
686
687
int hisi_zip_register_to_crypto(struct hisi_qm *qm)
688
{
689
int ret = 0;
690
691
mutex_lock(&zip_algs_lock);
692
if (zip_available_devs) {
693
zip_available_devs++;
694
goto unlock;
695
}
696
697
ret = hisi_zip_register_deflate(qm);
698
if (ret)
699
goto unlock;
700
701
ret = hisi_zip_register_lz4(qm);
702
if (ret)
703
goto unreg_deflate;
704
705
zip_available_devs++;
706
mutex_unlock(&zip_algs_lock);
707
708
return 0;
709
710
unreg_deflate:
711
hisi_zip_unregister_deflate(qm);
712
unlock:
713
mutex_unlock(&zip_algs_lock);
714
return ret;
715
}
716
717
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
718
{
719
mutex_lock(&zip_algs_lock);
720
if (--zip_available_devs)
721
goto unlock;
722
723
hisi_zip_unregister_deflate(qm);
724
hisi_zip_unregister_lz4(qm);
725
726
unlock:
727
mutex_unlock(&zip_algs_lock);
728
}
729
730