Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/hisilicon/zip/zip_crypto.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (c) 2019 HiSilicon Limited. */
3
#include <crypto/internal/acompress.h>
4
#include <linux/bitfield.h>
5
#include <linux/bitmap.h>
6
#include <linux/dma-mapping.h>
7
#include <linux/scatterlist.h>
8
#include "zip.h"
9
10
/* hisi_zip_sqe dw3 */
11
#define HZIP_BD_STATUS_M GENMASK(7, 0)
12
/* hisi_zip_sqe dw7 */
13
#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
14
#define HZIP_SQE_TYPE_M GENMASK(31, 28)
15
/* hisi_zip_sqe dw8 */
16
#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
17
/* hisi_zip_sqe dw9 */
18
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
19
#define HZIP_ALG_TYPE_DEFLATE 0x01
20
#define HZIP_BUF_TYPE_M GENMASK(11, 8)
21
#define HZIP_SGL 0x1
22
23
#define HZIP_ALG_PRIORITY 300
24
#define HZIP_SGL_SGE_NR 10
25
26
#define HZIP_ALG_DEFLATE GENMASK(5, 4)
27
28
static DEFINE_MUTEX(zip_algs_lock);
29
static unsigned int zip_available_devs;
30
31
enum hisi_zip_alg_type {
32
HZIP_ALG_TYPE_COMP = 0,
33
HZIP_ALG_TYPE_DECOMP = 1,
34
};
35
36
enum {
37
HZIP_QPC_COMP,
38
HZIP_QPC_DECOMP,
39
HZIP_CTX_Q_NUM
40
};
41
42
#define COMP_NAME_TO_TYPE(alg_name) \
43
(!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0)
44
45
struct hisi_zip_req {
46
struct acomp_req *req;
47
struct hisi_acc_hw_sgl *hw_src;
48
struct hisi_acc_hw_sgl *hw_dst;
49
dma_addr_t dma_src;
50
dma_addr_t dma_dst;
51
u16 req_id;
52
};
53
54
struct hisi_zip_req_q {
55
struct hisi_zip_req *q;
56
unsigned long *req_bitmap;
57
spinlock_t req_lock;
58
u16 size;
59
};
60
61
struct hisi_zip_qp_ctx {
62
struct hisi_qp *qp;
63
struct hisi_zip_req_q req_q;
64
struct hisi_acc_sgl_pool *sgl_pool;
65
struct hisi_zip *zip_dev;
66
struct hisi_zip_ctx *ctx;
67
};
68
69
struct hisi_zip_sqe_ops {
70
u8 sqe_type;
71
void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
72
void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
73
void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
74
void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
75
void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
76
void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
77
u32 (*get_tag)(struct hisi_zip_sqe *sqe);
78
u32 (*get_status)(struct hisi_zip_sqe *sqe);
79
u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
80
};
81
82
struct hisi_zip_ctx {
83
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
84
const struct hisi_zip_sqe_ops *ops;
85
};
86
87
static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
88
{
89
int ret;
90
u16 n;
91
92
if (!val)
93
return -EINVAL;
94
95
ret = kstrtou16(val, 10, &n);
96
if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
97
return -EINVAL;
98
99
return param_set_ushort(val, kp);
100
}
101
102
static const struct kernel_param_ops sgl_sge_nr_ops = {
103
.set = sgl_sge_nr_set,
104
.get = param_get_ushort,
105
};
106
107
static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
108
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
109
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
110
111
static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
112
struct acomp_req *req)
113
{
114
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
115
struct hisi_zip_req *q = req_q->q;
116
struct hisi_zip_req *req_cache;
117
int req_id;
118
119
spin_lock(&req_q->req_lock);
120
121
req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
122
if (req_id >= req_q->size) {
123
spin_unlock(&req_q->req_lock);
124
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
125
return ERR_PTR(-EAGAIN);
126
}
127
set_bit(req_id, req_q->req_bitmap);
128
129
spin_unlock(&req_q->req_lock);
130
131
req_cache = q + req_id;
132
req_cache->req_id = req_id;
133
req_cache->req = req;
134
135
return req_cache;
136
}
137
138
static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
139
struct hisi_zip_req *req)
140
{
141
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
142
143
spin_lock(&req_q->req_lock);
144
clear_bit(req->req_id, req_q->req_bitmap);
145
spin_unlock(&req_q->req_lock);
146
}
147
148
static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
149
{
150
sqe->source_addr_l = lower_32_bits(req->dma_src);
151
sqe->source_addr_h = upper_32_bits(req->dma_src);
152
sqe->dest_addr_l = lower_32_bits(req->dma_dst);
153
sqe->dest_addr_h = upper_32_bits(req->dma_dst);
154
}
155
156
static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
157
{
158
struct acomp_req *a_req = req->req;
159
160
sqe->input_data_length = a_req->slen;
161
sqe->dest_avail_out = a_req->dlen;
162
}
163
164
static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
165
{
166
u32 val;
167
168
val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
169
val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
170
sqe->dw9 = val;
171
}
172
173
static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
174
{
175
u32 val;
176
177
val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
178
val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
179
sqe->dw9 = val;
180
}
181
182
static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
183
{
184
sqe->dw26 = req->req_id;
185
}
186
187
static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
188
{
189
u32 val;
190
191
val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
192
val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
193
sqe->dw7 = val;
194
}
195
196
static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
197
u8 req_type, struct hisi_zip_req *req)
198
{
199
const struct hisi_zip_sqe_ops *ops = ctx->ops;
200
201
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
202
203
ops->fill_addr(sqe, req);
204
ops->fill_buf_size(sqe, req);
205
ops->fill_buf_type(sqe, HZIP_SGL);
206
ops->fill_req_type(sqe, req_type);
207
ops->fill_tag(sqe, req);
208
ops->fill_sqe_type(sqe, ops->sqe_type);
209
}
210
211
static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
212
struct hisi_zip_req *req)
213
{
214
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
215
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
216
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
217
struct acomp_req *a_req = req->req;
218
struct hisi_qp *qp = qp_ctx->qp;
219
struct device *dev = &qp->qm->pdev->dev;
220
struct hisi_zip_sqe zip_sqe;
221
int ret;
222
223
if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
224
return -EINVAL;
225
226
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
227
req->req_id << 1, &req->dma_src,
228
DMA_TO_DEVICE);
229
if (IS_ERR(req->hw_src)) {
230
dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
231
PTR_ERR(req->hw_src));
232
return PTR_ERR(req->hw_src);
233
}
234
235
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
236
(req->req_id << 1) + 1,
237
&req->dma_dst, DMA_FROM_DEVICE);
238
if (IS_ERR(req->hw_dst)) {
239
ret = PTR_ERR(req->hw_dst);
240
dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
241
ret);
242
goto err_unmap_input;
243
}
244
245
hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
246
247
/* send command to start a task */
248
atomic64_inc(&dfx->send_cnt);
249
spin_lock_bh(&req_q->req_lock);
250
ret = hisi_qp_send(qp, &zip_sqe);
251
spin_unlock_bh(&req_q->req_lock);
252
if (unlikely(ret < 0)) {
253
atomic64_inc(&dfx->send_busy_cnt);
254
ret = -EAGAIN;
255
dev_dbg_ratelimited(dev, "failed to send request!\n");
256
goto err_unmap_output;
257
}
258
259
return -EINPROGRESS;
260
261
err_unmap_output:
262
hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst, DMA_FROM_DEVICE);
263
err_unmap_input:
264
hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src, DMA_TO_DEVICE);
265
return ret;
266
}
267
268
static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe)
269
{
270
return sqe->dw26;
271
}
272
273
static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
274
{
275
return sqe->dw3 & HZIP_BD_STATUS_M;
276
}
277
278
static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
279
{
280
return sqe->produced;
281
}
282
283
static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
284
{
285
struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
286
const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
287
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
288
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
289
struct device *dev = &qp->qm->pdev->dev;
290
struct hisi_zip_sqe *sqe = data;
291
u32 tag = ops->get_tag(sqe);
292
struct hisi_zip_req *req = req_q->q + tag;
293
struct acomp_req *acomp_req = req->req;
294
int err = 0;
295
u32 status;
296
297
atomic64_inc(&dfx->recv_cnt);
298
status = ops->get_status(sqe);
299
if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
300
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
301
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
302
sqe->produced);
303
atomic64_inc(&dfx->err_bd_cnt);
304
err = -EIO;
305
}
306
307
hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst, DMA_FROM_DEVICE);
308
hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src, DMA_TO_DEVICE);
309
310
acomp_req->dlen = ops->get_dstlen(sqe);
311
312
if (acomp_req->base.complete)
313
acomp_request_complete(acomp_req, err);
314
315
hisi_zip_remove_req(qp_ctx, req);
316
}
317
318
static int hisi_zip_acompress(struct acomp_req *acomp_req)
319
{
320
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
321
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
322
struct device *dev = &qp_ctx->qp->qm->pdev->dev;
323
struct hisi_zip_req *req;
324
int ret;
325
326
req = hisi_zip_create_req(qp_ctx, acomp_req);
327
if (IS_ERR(req))
328
return PTR_ERR(req);
329
330
ret = hisi_zip_do_work(qp_ctx, req);
331
if (unlikely(ret != -EINPROGRESS)) {
332
dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
333
hisi_zip_remove_req(qp_ctx, req);
334
}
335
336
return ret;
337
}
338
339
static int hisi_zip_adecompress(struct acomp_req *acomp_req)
340
{
341
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
342
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
343
struct device *dev = &qp_ctx->qp->qm->pdev->dev;
344
struct hisi_zip_req *req;
345
int ret;
346
347
req = hisi_zip_create_req(qp_ctx, acomp_req);
348
if (IS_ERR(req))
349
return PTR_ERR(req);
350
351
ret = hisi_zip_do_work(qp_ctx, req);
352
if (unlikely(ret != -EINPROGRESS)) {
353
dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
354
ret);
355
hisi_zip_remove_req(qp_ctx, req);
356
}
357
358
return ret;
359
}
360
361
static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
362
int alg_type, int req_type)
363
{
364
struct device *dev = &qp->qm->pdev->dev;
365
int ret;
366
367
qp->req_type = req_type;
368
qp->alg_type = alg_type;
369
qp->qp_ctx = qp_ctx;
370
371
ret = hisi_qm_start_qp(qp, 0);
372
if (ret < 0) {
373
dev_err(dev, "failed to start qp (%d)!\n", ret);
374
return ret;
375
}
376
377
qp_ctx->qp = qp;
378
379
return 0;
380
}
381
382
static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
383
{
384
hisi_qm_stop_qp(qp_ctx->qp);
385
hisi_qm_free_qps(&qp_ctx->qp, 1);
386
}
387
388
static const struct hisi_zip_sqe_ops hisi_zip_ops = {
389
.sqe_type = 0x3,
390
.fill_addr = hisi_zip_fill_addr,
391
.fill_buf_size = hisi_zip_fill_buf_size,
392
.fill_buf_type = hisi_zip_fill_buf_type,
393
.fill_req_type = hisi_zip_fill_req_type,
394
.fill_tag = hisi_zip_fill_tag,
395
.fill_sqe_type = hisi_zip_fill_sqe_type,
396
.get_tag = hisi_zip_get_tag,
397
.get_status = hisi_zip_get_status,
398
.get_dstlen = hisi_zip_get_dstlen,
399
};
400
401
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
402
{
403
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
404
struct hisi_zip_qp_ctx *qp_ctx;
405
struct hisi_zip *hisi_zip;
406
int ret, i, j;
407
408
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
409
if (ret) {
410
pr_err("failed to create zip qps (%d)!\n", ret);
411
return -ENODEV;
412
}
413
414
hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
415
416
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
417
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
418
qp_ctx = &hisi_zip_ctx->qp_ctx[i];
419
qp_ctx->ctx = hisi_zip_ctx;
420
ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
421
if (ret) {
422
for (j = i - 1; j >= 0; j--)
423
hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
424
425
hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
426
return ret;
427
}
428
429
qp_ctx->zip_dev = hisi_zip;
430
}
431
432
hisi_zip_ctx->ops = &hisi_zip_ops;
433
434
return 0;
435
}
436
437
static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
438
{
439
int i;
440
441
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
442
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
443
}
444
445
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
446
{
447
u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
448
struct hisi_zip_req_q *req_q;
449
int i, ret;
450
451
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
452
req_q = &ctx->qp_ctx[i].req_q;
453
req_q->size = q_depth;
454
455
req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
456
if (!req_q->req_bitmap) {
457
ret = -ENOMEM;
458
if (i == 0)
459
return ret;
460
461
goto err_free_comp_q;
462
}
463
spin_lock_init(&req_q->req_lock);
464
465
req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
466
GFP_KERNEL);
467
if (!req_q->q) {
468
ret = -ENOMEM;
469
if (i == 0)
470
goto err_free_comp_bitmap;
471
else
472
goto err_free_decomp_bitmap;
473
}
474
}
475
476
return 0;
477
478
err_free_decomp_bitmap:
479
bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
480
err_free_comp_q:
481
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
482
err_free_comp_bitmap:
483
bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
484
return ret;
485
}
486
487
static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
488
{
489
int i;
490
491
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
492
kfree(ctx->qp_ctx[i].req_q.q);
493
bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
494
}
495
}
496
497
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
498
{
499
u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
500
struct hisi_zip_qp_ctx *tmp;
501
struct device *dev;
502
int i;
503
504
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
505
tmp = &ctx->qp_ctx[i];
506
dev = &tmp->qp->qm->pdev->dev;
507
tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
508
sgl_sge_nr);
509
if (IS_ERR(tmp->sgl_pool)) {
510
if (i == 1)
511
goto err_free_sgl_pool0;
512
return -ENOMEM;
513
}
514
}
515
516
return 0;
517
518
err_free_sgl_pool0:
519
hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
520
ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
521
return -ENOMEM;
522
}
523
524
static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
525
{
526
int i;
527
528
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
529
hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
530
ctx->qp_ctx[i].sgl_pool);
531
}
532
533
static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
534
void (*fn)(struct hisi_qp *, void *))
535
{
536
int i;
537
538
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
539
ctx->qp_ctx[i].qp->req_cb = fn;
540
}
541
542
static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
543
{
544
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
545
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
546
struct device *dev;
547
int ret;
548
549
ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
550
if (ret) {
551
pr_err("failed to init ctx (%d)!\n", ret);
552
return ret;
553
}
554
555
dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
556
557
ret = hisi_zip_create_req_q(ctx);
558
if (ret) {
559
dev_err(dev, "failed to create request queue (%d)!\n", ret);
560
goto err_ctx_exit;
561
}
562
563
ret = hisi_zip_create_sgl_pool(ctx);
564
if (ret) {
565
dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
566
goto err_release_req_q;
567
}
568
569
hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
570
571
return 0;
572
573
err_release_req_q:
574
hisi_zip_release_req_q(ctx);
575
err_ctx_exit:
576
hisi_zip_ctx_exit(ctx);
577
return ret;
578
}
579
580
static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
581
{
582
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
583
584
hisi_zip_set_acomp_cb(ctx, NULL);
585
hisi_zip_release_sgl_pool(ctx);
586
hisi_zip_release_req_q(ctx);
587
hisi_zip_ctx_exit(ctx);
588
}
589
590
static struct acomp_alg hisi_zip_acomp_deflate = {
591
.init = hisi_zip_acomp_init,
592
.exit = hisi_zip_acomp_exit,
593
.compress = hisi_zip_acompress,
594
.decompress = hisi_zip_adecompress,
595
.base = {
596
.cra_name = "deflate",
597
.cra_driver_name = "hisi-deflate-acomp",
598
.cra_flags = CRYPTO_ALG_ASYNC,
599
.cra_module = THIS_MODULE,
600
.cra_priority = HZIP_ALG_PRIORITY,
601
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
602
}
603
};
604
605
static int hisi_zip_register_deflate(struct hisi_qm *qm)
606
{
607
int ret;
608
609
if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
610
return 0;
611
612
ret = crypto_register_acomp(&hisi_zip_acomp_deflate);
613
if (ret)
614
dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret);
615
616
return ret;
617
}
618
619
static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
620
{
621
if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
622
return;
623
624
crypto_unregister_acomp(&hisi_zip_acomp_deflate);
625
}
626
627
int hisi_zip_register_to_crypto(struct hisi_qm *qm)
628
{
629
int ret = 0;
630
631
mutex_lock(&zip_algs_lock);
632
if (zip_available_devs++)
633
goto unlock;
634
635
ret = hisi_zip_register_deflate(qm);
636
if (ret)
637
zip_available_devs--;
638
639
unlock:
640
mutex_unlock(&zip_algs_lock);
641
return ret;
642
}
643
644
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
645
{
646
mutex_lock(&zip_algs_lock);
647
if (--zip_available_devs)
648
goto unlock;
649
650
hisi_zip_unregister_deflate(qm);
651
652
unlock:
653
mutex_unlock(&zip_algs_lock);
654
}
655
656