Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (C) 2020 Marvell. */
3
4
#include "otx2_cpt_common.h"
5
#include "otx2_cptvf.h"
6
#include "otx2_cptlf.h"
7
#include "otx2_cptvf_algs.h"
8
#include "cn10k_cpt.h"
9
#include <rvu_reg.h>
10
11
#define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
12
13
static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
14
{
15
/* Clear interrupt if any */
16
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
17
0x1ULL);
18
19
/* Enable PF-VF interrupt */
20
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
21
OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
22
}
23
24
static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
25
{
26
/* Disable PF-VF interrupt */
27
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
28
OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
29
30
/* Clear interrupt if any */
31
otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
32
0x1ULL);
33
}
34
35
static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
36
{
37
int ret, irq;
38
int num_vec;
39
40
num_vec = pci_msix_vec_count(cptvf->pdev);
41
if (num_vec <= 0)
42
return -EINVAL;
43
44
/* Enable MSI-X */
45
ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
46
PCI_IRQ_MSIX);
47
if (ret < 0) {
48
dev_err(&cptvf->pdev->dev,
49
"Request for %d msix vectors failed\n", num_vec);
50
return ret;
51
}
52
irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
53
/* Register VF<=>PF mailbox interrupt handler */
54
ret = devm_request_irq(&cptvf->pdev->dev, irq,
55
otx2_cptvf_pfvf_mbox_intr, 0,
56
"CPTPFVF Mbox", cptvf);
57
if (ret)
58
return ret;
59
/* Enable PF-VF mailbox interrupts */
60
cptvf_enable_pfvf_mbox_intrs(cptvf);
61
62
ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
63
if (ret) {
64
dev_warn(&cptvf->pdev->dev,
65
"PF not responding to mailbox, deferring probe\n");
66
cptvf_disable_pfvf_mbox_intrs(cptvf);
67
return -EPROBE_DEFER;
68
}
69
return 0;
70
}
71
72
static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
73
{
74
struct pci_dev *pdev = cptvf->pdev;
75
resource_size_t offset, size;
76
int ret;
77
78
cptvf->pfvf_mbox_wq =
79
alloc_ordered_workqueue("cpt_pfvf_mailbox",
80
WQ_HIGHPRI | WQ_MEM_RECLAIM);
81
if (!cptvf->pfvf_mbox_wq)
82
return -ENOMEM;
83
84
if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
85
/* For cn10k platform, VF mailbox region is in its BAR2
86
* register space
87
*/
88
cptvf->pfvf_mbox_base = cptvf->reg_base +
89
CN10K_CPT_VF_MBOX_REGION;
90
} else {
91
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
92
size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
93
/* Map PF-VF mailbox memory */
94
cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
95
size);
96
if (!cptvf->pfvf_mbox_base) {
97
dev_err(&pdev->dev, "Unable to map BAR4\n");
98
ret = -ENOMEM;
99
goto free_wqe;
100
}
101
}
102
103
ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
104
pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
105
if (ret)
106
goto free_wqe;
107
108
ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
109
if (ret)
110
goto destroy_mbox;
111
112
INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
113
return 0;
114
115
destroy_mbox:
116
otx2_mbox_destroy(&cptvf->pfvf_mbox);
117
free_wqe:
118
destroy_workqueue(cptvf->pfvf_mbox_wq);
119
return ret;
120
}
121
122
static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
123
{
124
destroy_workqueue(cptvf->pfvf_mbox_wq);
125
otx2_mbox_destroy(&cptvf->pfvf_mbox);
126
}
127
128
static void cptlf_work_handler(unsigned long data)
129
{
130
otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
131
}
132
133
static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
134
{
135
int i;
136
137
for (i = 0; i < lfs->lfs_num; i++) {
138
if (!lfs->lf[i].wqe)
139
continue;
140
141
tasklet_kill(&lfs->lf[i].wqe->work);
142
kfree(lfs->lf[i].wqe);
143
lfs->lf[i].wqe = NULL;
144
}
145
}
146
147
static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
148
{
149
struct otx2_cptlf_wqe *wqe;
150
int i, ret = 0;
151
152
for (i = 0; i < lfs->lfs_num; i++) {
153
wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
154
if (!wqe) {
155
ret = -ENOMEM;
156
goto cleanup_tasklet;
157
}
158
159
tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
160
wqe->lfs = lfs;
161
wqe->lf_num = i;
162
lfs->lf[i].wqe = wqe;
163
}
164
return 0;
165
166
cleanup_tasklet:
167
cleanup_tasklet_work(lfs);
168
return ret;
169
}
170
171
static void free_pending_queues(struct otx2_cptlfs_info *lfs)
172
{
173
int i;
174
175
for (i = 0; i < lfs->lfs_num; i++) {
176
kfree(lfs->lf[i].pqueue.head);
177
lfs->lf[i].pqueue.head = NULL;
178
}
179
}
180
181
static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
182
{
183
int size, ret, i;
184
185
if (!lfs->lfs_num)
186
return -EINVAL;
187
188
for (i = 0; i < lfs->lfs_num; i++) {
189
lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
190
size = lfs->lf[i].pqueue.qlen *
191
sizeof(struct otx2_cpt_pending_entry);
192
193
lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
194
if (!lfs->lf[i].pqueue.head) {
195
ret = -ENOMEM;
196
goto error;
197
}
198
199
/* Initialize spin lock */
200
spin_lock_init(&lfs->lf[i].pqueue.lock);
201
}
202
return 0;
203
204
error:
205
free_pending_queues(lfs);
206
return ret;
207
}
208
209
static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
210
{
211
cleanup_tasklet_work(lfs);
212
free_pending_queues(lfs);
213
}
214
215
static int lf_sw_init(struct otx2_cptlfs_info *lfs)
216
{
217
int ret;
218
219
ret = alloc_pending_queues(lfs);
220
if (ret) {
221
dev_err(&lfs->pdev->dev,
222
"Allocating pending queues failed\n");
223
return ret;
224
}
225
ret = init_tasklet_work(lfs);
226
if (ret) {
227
dev_err(&lfs->pdev->dev,
228
"Tasklet work init failed\n");
229
goto pending_queues_free;
230
}
231
return 0;
232
233
pending_queues_free:
234
free_pending_queues(lfs);
235
return ret;
236
}
237
238
static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
239
{
240
atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
241
242
/* Remove interrupts affinity */
243
otx2_cptlf_free_irqs_affinity(lfs);
244
/* Disable instruction queue */
245
otx2_cptlf_disable_iqueues(lfs);
246
/* Unregister crypto algorithms */
247
otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
248
/* Unregister LFs interrupts */
249
otx2_cptlf_unregister_misc_interrupts(lfs);
250
otx2_cptlf_unregister_done_interrupts(lfs);
251
/* Cleanup LFs software side */
252
lf_sw_cleanup(lfs);
253
/* Free instruction queues */
254
otx2_cpt_free_instruction_queues(lfs);
255
/* Send request to detach LFs */
256
otx2_cpt_detach_rsrcs_msg(lfs);
257
lfs->lfs_num = 0;
258
}
259
260
static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
261
{
262
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
263
struct device *dev = &cptvf->pdev->dev;
264
int ret, lfs_num;
265
u8 eng_grp_msk;
266
267
/* Get engine group number for symmetric crypto */
268
cptvf->lfs.kcrypto_se_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
269
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
270
if (ret)
271
return ret;
272
273
if (cptvf->lfs.kcrypto_se_eng_grp_num ==
274
OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
275
dev_err(dev,
276
"Symmetric Engine group for crypto not available\n");
277
return -ENOENT;
278
}
279
280
/* Get engine group number for asymmetric crypto */
281
cptvf->lfs.kcrypto_ae_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
282
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_AE_TYPES);
283
if (ret)
284
return ret;
285
286
if (cptvf->lfs.kcrypto_ae_eng_grp_num ==
287
OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
288
dev_err(dev,
289
"Asymmetric Engine group for crypto not available\n");
290
return -ENOENT;
291
}
292
293
eng_grp_msk = BIT(cptvf->lfs.kcrypto_se_eng_grp_num) |
294
BIT(cptvf->lfs.kcrypto_ae_eng_grp_num);
295
296
ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
297
if (ret)
298
return ret;
299
300
lfs_num = cptvf->lfs.kvf_limits;
301
302
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
303
lfs_num);
304
if (ret)
305
return ret;
306
307
/* Get msix offsets for attached LFs */
308
ret = otx2_cpt_msix_offset_msg(lfs);
309
if (ret)
310
goto cleanup_lf;
311
312
/* Initialize LFs software side */
313
ret = lf_sw_init(lfs);
314
if (ret)
315
goto cleanup_lf;
316
317
/* Register LFs interrupts */
318
ret = otx2_cptlf_register_misc_interrupts(lfs);
319
if (ret)
320
goto cleanup_lf_sw;
321
322
ret = otx2_cptlf_register_done_interrupts(lfs);
323
if (ret)
324
goto cleanup_lf_sw;
325
326
/* Set interrupts affinity */
327
ret = otx2_cptlf_set_irqs_affinity(lfs);
328
if (ret)
329
goto unregister_intr;
330
331
atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
332
/* Register crypto algorithms */
333
ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
334
if (ret) {
335
dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
336
goto disable_irqs;
337
}
338
return 0;
339
340
disable_irqs:
341
otx2_cptlf_free_irqs_affinity(lfs);
342
unregister_intr:
343
otx2_cptlf_unregister_misc_interrupts(lfs);
344
otx2_cptlf_unregister_done_interrupts(lfs);
345
cleanup_lf_sw:
346
lf_sw_cleanup(lfs);
347
cleanup_lf:
348
otx2_cptlf_shutdown(lfs);
349
350
return ret;
351
}
352
353
static int otx2_cptvf_probe(struct pci_dev *pdev,
354
const struct pci_device_id *ent)
355
{
356
struct device *dev = &pdev->dev;
357
struct otx2_cptvf_dev *cptvf;
358
int ret;
359
360
cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
361
if (!cptvf)
362
return -ENOMEM;
363
364
ret = pcim_enable_device(pdev);
365
if (ret) {
366
dev_err(dev, "Failed to enable PCI device\n");
367
goto clear_drvdata;
368
}
369
370
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
371
if (ret) {
372
dev_err(dev, "Unable to get usable DMA configuration\n");
373
goto clear_drvdata;
374
}
375
376
ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME);
377
if (ret) {
378
dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
379
goto clear_drvdata;
380
}
381
pci_set_master(pdev);
382
pci_set_drvdata(pdev, cptvf);
383
cptvf->pdev = pdev;
384
385
/* Map VF's configuration registers */
386
cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
387
if (!cptvf->reg_base) {
388
ret = -ENOMEM;
389
dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret);
390
goto clear_drvdata;
391
}
392
393
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
394
395
/* Initialize PF<=>VF mailbox */
396
ret = cptvf_pfvf_mbox_init(cptvf);
397
if (ret)
398
goto clear_drvdata;
399
400
/* Register interrupts */
401
ret = cptvf_register_interrupts(cptvf);
402
if (ret)
403
goto destroy_pfvf_mbox;
404
405
cptvf->blkaddr = BLKADDR_CPT0;
406
407
cptvf_hw_ops_get(cptvf);
408
409
otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base,
410
&cptvf->pfvf_mbox, cptvf->blkaddr);
411
412
ret = otx2_cptvf_send_caps_msg(cptvf);
413
if (ret) {
414
dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
415
goto unregister_interrupts;
416
}
417
if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
418
cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
419
420
ret = cn10k_cptvf_lmtst_init(cptvf);
421
if (ret)
422
goto unregister_interrupts;
423
424
/* Initialize CPT LFs */
425
ret = cptvf_lf_init(cptvf);
426
if (ret)
427
goto free_lmtst;
428
429
return 0;
430
431
free_lmtst:
432
cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
433
unregister_interrupts:
434
cptvf_disable_pfvf_mbox_intrs(cptvf);
435
destroy_pfvf_mbox:
436
cptvf_pfvf_mbox_destroy(cptvf);
437
clear_drvdata:
438
pci_set_drvdata(pdev, NULL);
439
440
return ret;
441
}
442
443
static void otx2_cptvf_remove(struct pci_dev *pdev)
444
{
445
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
446
447
if (!cptvf) {
448
dev_err(&pdev->dev, "Invalid CPT VF device.\n");
449
return;
450
}
451
cptvf_lf_shutdown(&cptvf->lfs);
452
/* Disable PF-VF mailbox interrupt */
453
cptvf_disable_pfvf_mbox_intrs(cptvf);
454
/* Destroy PF-VF mbox */
455
cptvf_pfvf_mbox_destroy(cptvf);
456
/* Free LMTST memory */
457
cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
458
pci_set_drvdata(pdev, NULL);
459
}
460
461
/* Supported devices */
462
static const struct pci_device_id otx2_cptvf_id_table[] = {
463
{PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
464
{PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
465
{ 0, } /* end of table */
466
};
467
468
static struct pci_driver otx2_cptvf_pci_driver = {
469
.name = OTX2_CPTVF_DRV_NAME,
470
.id_table = otx2_cptvf_id_table,
471
.probe = otx2_cptvf_probe,
472
.remove = otx2_cptvf_remove,
473
};
474
475
module_pci_driver(otx2_cptvf_pci_driver);
476
477
MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
478
479
MODULE_AUTHOR("Marvell");
480
MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
481
MODULE_LICENSE("GPL v2");
482
MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
483
484