Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (C) 2020 Marvell. */
3
4
#include <linux/firmware.h>
5
#include "otx2_cpt_hw_types.h"
6
#include "otx2_cpt_common.h"
7
#include "otx2_cpt_devlink.h"
8
#include "otx2_cptpf_ucode.h"
9
#include "otx2_cptpf.h"
10
#include "cn10k_cpt.h"
11
#include "rvu_reg.h"
12
13
#define OTX2_CPT_DRV_NAME "rvu_cptpf"
14
#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
15
16
#define CPT_UC_RID_CN9K_B0 1
17
#define CPT_UC_RID_CN10K_A 4
18
#define CPT_UC_RID_CN10K_B 5
19
20
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
21
int num_vfs)
22
{
23
int ena_bits;
24
25
/* Clear any pending interrupts */
26
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
27
RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
28
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
29
RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
30
31
/* Enable VF interrupts for VFs from 0 to 63 */
32
ena_bits = ((num_vfs - 1) % 64);
33
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
34
RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
35
GENMASK_ULL(ena_bits, 0));
36
37
if (num_vfs > 64) {
38
/* Enable VF interrupts for VFs from 64 to 127 */
39
ena_bits = num_vfs - 64 - 1;
40
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
41
RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
42
GENMASK_ULL(ena_bits, 0));
43
}
44
}
45
46
static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
47
int num_vfs)
48
{
49
int vector;
50
51
/* Disable VF-PF interrupts */
52
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
53
RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
54
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
55
RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
56
/* Clear any pending interrupts */
57
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
58
RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
59
60
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
61
free_irq(vector, cptpf);
62
63
if (num_vfs > 64) {
64
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
65
RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
66
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
67
free_irq(vector, cptpf);
68
}
69
}
70
71
static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
72
int num_vfs)
73
{
74
/* Clear FLR interrupt if any */
75
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
76
INTR_MASK(num_vfs));
77
78
/* Enable VF FLR interrupts */
79
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
80
RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
81
/* Clear ME interrupt if any */
82
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
83
INTR_MASK(num_vfs));
84
/* Enable VF ME interrupts */
85
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
86
RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
87
88
if (num_vfs <= 64)
89
return;
90
91
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
92
INTR_MASK(num_vfs - 64));
93
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
94
RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
95
96
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
97
INTR_MASK(num_vfs - 64));
98
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
99
RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
100
}
101
102
static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
103
int num_vfs)
104
{
105
int vector;
106
107
/* Disable VF FLR interrupts */
108
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
109
RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
110
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
111
free_irq(vector, cptpf);
112
113
/* Disable VF ME interrupts */
114
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
115
RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
116
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
117
free_irq(vector, cptpf);
118
119
if (num_vfs <= 64)
120
return;
121
122
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
123
RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
124
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
125
free_irq(vector, cptpf);
126
127
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
128
RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
129
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
130
free_irq(vector, cptpf);
131
}
132
133
static void cptpf_flr_wq_handler(struct work_struct *work)
134
{
135
struct cptpf_flr_work *flr_work;
136
struct otx2_cptpf_dev *pf;
137
struct mbox_msghdr *req;
138
struct otx2_mbox *mbox;
139
int vf, reg = 0;
140
141
flr_work = container_of(work, struct cptpf_flr_work, work);
142
pf = flr_work->pf;
143
mbox = &pf->afpf_mbox;
144
145
vf = flr_work - pf->flr_work;
146
147
mutex_lock(&pf->lock);
148
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
149
sizeof(struct msg_rsp));
150
if (!req) {
151
mutex_unlock(&pf->lock);
152
return;
153
}
154
155
req->sig = OTX2_MBOX_REQ_SIG;
156
req->id = MBOX_MSG_VF_FLR;
157
req->pcifunc &= RVU_PFVF_FUNC_MASK;
158
req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
159
160
otx2_cpt_send_mbox_msg(mbox, pf->pdev);
161
if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
162
163
if (vf >= 64) {
164
reg = 1;
165
vf = vf - 64;
166
}
167
/* Clear transaction pending register */
168
otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
169
RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
170
otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
171
RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
172
}
173
mutex_unlock(&pf->lock);
174
}
175
176
static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
177
{
178
int reg, dev, vf, start_vf, num_reg = 1;
179
struct otx2_cptpf_dev *cptpf = arg;
180
u64 intr;
181
182
if (cptpf->max_vfs > 64)
183
num_reg = 2;
184
185
for (reg = 0; reg < num_reg; reg++) {
186
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
187
RVU_PF_VFFLR_INTX(reg));
188
if (!intr)
189
continue;
190
start_vf = 64 * reg;
191
for (vf = 0; vf < 64; vf++) {
192
if (!(intr & BIT_ULL(vf)))
193
continue;
194
dev = vf + start_vf;
195
queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
196
/* Clear interrupt */
197
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
198
RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
199
/* Disable the interrupt */
200
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
201
RVU_PF_VFFLR_INT_ENA_W1CX(reg),
202
BIT_ULL(vf));
203
}
204
}
205
return IRQ_HANDLED;
206
}
207
208
static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
209
{
210
struct otx2_cptpf_dev *cptpf = arg;
211
int reg, vf, num_reg = 1;
212
u64 intr;
213
214
if (cptpf->max_vfs > 64)
215
num_reg = 2;
216
217
for (reg = 0; reg < num_reg; reg++) {
218
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
219
RVU_PF_VFME_INTX(reg));
220
if (!intr)
221
continue;
222
for (vf = 0; vf < 64; vf++) {
223
if (!(intr & BIT_ULL(vf)))
224
continue;
225
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
226
RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
227
/* Clear interrupt */
228
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
229
RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
230
}
231
}
232
return IRQ_HANDLED;
233
}
234
235
static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
236
int num_vfs)
237
{
238
cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
239
cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
240
}
241
242
static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
243
{
244
struct pci_dev *pdev = cptpf->pdev;
245
struct device *dev = &pdev->dev;
246
int ret, vector;
247
248
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
249
/* Register VF-PF mailbox interrupt handler */
250
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
251
cptpf);
252
if (ret) {
253
dev_err(dev,
254
"IRQ registration failed for PFVF mbox0 irq\n");
255
return ret;
256
}
257
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
258
/* Register VF FLR interrupt handler */
259
ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
260
if (ret) {
261
dev_err(dev,
262
"IRQ registration failed for VFFLR0 irq\n");
263
goto free_mbox0_irq;
264
}
265
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
266
/* Register VF ME interrupt handler */
267
ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
268
if (ret) {
269
dev_err(dev,
270
"IRQ registration failed for PFVF mbox0 irq\n");
271
goto free_flr0_irq;
272
}
273
274
if (num_vfs > 64) {
275
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
276
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
277
"CPTVFPF Mbox1", cptpf);
278
if (ret) {
279
dev_err(dev,
280
"IRQ registration failed for PFVF mbox1 irq\n");
281
goto free_me0_irq;
282
}
283
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
284
/* Register VF FLR interrupt handler */
285
ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
286
cptpf);
287
if (ret) {
288
dev_err(dev,
289
"IRQ registration failed for VFFLR1 irq\n");
290
goto free_mbox1_irq;
291
}
292
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
293
/* Register VF FLR interrupt handler */
294
ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
295
cptpf);
296
if (ret) {
297
dev_err(dev,
298
"IRQ registration failed for VFFLR1 irq\n");
299
goto free_flr1_irq;
300
}
301
}
302
cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
303
cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
304
305
return 0;
306
307
free_flr1_irq:
308
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
309
free_irq(vector, cptpf);
310
free_mbox1_irq:
311
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
312
free_irq(vector, cptpf);
313
free_me0_irq:
314
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
315
free_irq(vector, cptpf);
316
free_flr0_irq:
317
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
318
free_irq(vector, cptpf);
319
free_mbox0_irq:
320
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
321
free_irq(vector, cptpf);
322
return ret;
323
}
324
325
static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
326
{
327
if (!pf->flr_wq)
328
return;
329
destroy_workqueue(pf->flr_wq);
330
pf->flr_wq = NULL;
331
kfree(pf->flr_work);
332
}
333
334
static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
335
{
336
int vf;
337
338
cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
339
if (!cptpf->flr_wq)
340
return -ENOMEM;
341
342
cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
343
GFP_KERNEL);
344
if (!cptpf->flr_work)
345
goto destroy_wq;
346
347
for (vf = 0; vf < num_vfs; vf++) {
348
cptpf->flr_work[vf].pf = cptpf;
349
INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
350
}
351
return 0;
352
353
destroy_wq:
354
destroy_workqueue(cptpf->flr_wq);
355
return -ENOMEM;
356
}
357
358
static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
359
{
360
struct device *dev = &cptpf->pdev->dev;
361
u64 vfpf_mbox_base;
362
int err, i;
363
364
cptpf->vfpf_mbox_wq =
365
alloc_ordered_workqueue("cpt_vfpf_mailbox",
366
WQ_HIGHPRI | WQ_MEM_RECLAIM);
367
if (!cptpf->vfpf_mbox_wq)
368
return -ENOMEM;
369
370
/* Map VF-PF mailbox memory */
371
if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
372
vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
373
else
374
vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
375
376
if (!vfpf_mbox_base) {
377
dev_err(dev, "VF-PF mailbox address not configured\n");
378
err = -ENOMEM;
379
goto free_wqe;
380
}
381
cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
382
MBOX_SIZE * cptpf->max_vfs);
383
if (!cptpf->vfpf_mbox_base) {
384
dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
385
err = -ENOMEM;
386
goto free_wqe;
387
}
388
err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
389
cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
390
num_vfs);
391
if (err)
392
goto free_wqe;
393
394
for (i = 0; i < num_vfs; i++) {
395
cptpf->vf[i].vf_id = i;
396
cptpf->vf[i].cptpf = cptpf;
397
cptpf->vf[i].intr_idx = i % 64;
398
INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
399
otx2_cptpf_vfpf_mbox_handler);
400
}
401
return 0;
402
403
free_wqe:
404
destroy_workqueue(cptpf->vfpf_mbox_wq);
405
return err;
406
}
407
408
static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
409
{
410
destroy_workqueue(cptpf->vfpf_mbox_wq);
411
otx2_mbox_destroy(&cptpf->vfpf_mbox);
412
}
413
414
static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
415
{
416
/* Disable AF-PF interrupt */
417
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
418
0x1ULL);
419
/* Clear interrupt if any */
420
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
421
}
422
423
static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
424
{
425
struct pci_dev *pdev = cptpf->pdev;
426
struct device *dev = &pdev->dev;
427
int ret, irq;
428
429
irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
430
/* Register AF-PF mailbox interrupt handler */
431
ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
432
"CPTAFPF Mbox", cptpf);
433
if (ret) {
434
dev_err(dev,
435
"IRQ registration failed for PFAF mbox irq\n");
436
return ret;
437
}
438
/* Clear interrupt if any, to avoid spurious interrupts */
439
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
440
/* Enable AF-PF interrupt */
441
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
442
0x1ULL);
443
444
ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
445
if (ret) {
446
dev_warn(dev,
447
"AF not responding to mailbox, deferring probe\n");
448
cptpf_disable_afpf_mbox_intr(cptpf);
449
return -EPROBE_DEFER;
450
}
451
return 0;
452
}
453
454
static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
455
{
456
struct pci_dev *pdev = cptpf->pdev;
457
resource_size_t offset;
458
int err;
459
460
cptpf->afpf_mbox_wq =
461
alloc_ordered_workqueue("cpt_afpf_mailbox",
462
WQ_HIGHPRI | WQ_MEM_RECLAIM);
463
if (!cptpf->afpf_mbox_wq)
464
return -ENOMEM;
465
466
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
467
/* Map AF-PF mailbox memory */
468
cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
469
if (!cptpf->afpf_mbox_base) {
470
dev_err(&pdev->dev, "Unable to map BAR4\n");
471
err = -ENOMEM;
472
goto error;
473
}
474
475
err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
476
pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
477
if (err)
478
goto error;
479
480
err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
481
pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
482
if (err)
483
goto mbox_cleanup;
484
485
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
486
INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
487
mutex_init(&cptpf->lock);
488
489
return 0;
490
491
mbox_cleanup:
492
otx2_mbox_destroy(&cptpf->afpf_mbox);
493
error:
494
destroy_workqueue(cptpf->afpf_mbox_wq);
495
return err;
496
}
497
498
static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
499
{
500
destroy_workqueue(cptpf->afpf_mbox_wq);
501
otx2_mbox_destroy(&cptpf->afpf_mbox);
502
otx2_mbox_destroy(&cptpf->afpf_mbox_up);
503
}
504
505
static ssize_t sso_pf_func_ovrd_show(struct device *dev,
506
struct device_attribute *attr, char *buf)
507
{
508
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
509
510
return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
511
}
512
513
static ssize_t sso_pf_func_ovrd_store(struct device *dev,
514
struct device_attribute *attr,
515
const char *buf, size_t count)
516
{
517
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
518
u8 sso_pf_func_ovrd;
519
520
if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
521
return count;
522
523
if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
524
return -EINVAL;
525
526
cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
527
528
return count;
529
}
530
531
static ssize_t kvf_limits_show(struct device *dev,
532
struct device_attribute *attr, char *buf)
533
{
534
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
535
536
return sprintf(buf, "%d\n", cptpf->kvf_limits);
537
}
538
539
static ssize_t kvf_limits_store(struct device *dev,
540
struct device_attribute *attr,
541
const char *buf, size_t count)
542
{
543
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
544
int lfs_num;
545
int ret;
546
547
ret = kstrtoint(buf, 0, &lfs_num);
548
if (ret)
549
return ret;
550
if (lfs_num < 1 || lfs_num > num_online_cpus()) {
551
dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
552
lfs_num, num_online_cpus());
553
return -EINVAL;
554
}
555
cptpf->kvf_limits = lfs_num;
556
557
return count;
558
}
559
560
static DEVICE_ATTR_RW(kvf_limits);
561
static DEVICE_ATTR_RW(sso_pf_func_ovrd);
562
563
static struct attribute *cptpf_attrs[] = {
564
&dev_attr_kvf_limits.attr,
565
&dev_attr_sso_pf_func_ovrd.attr,
566
NULL
567
};
568
569
static const struct attribute_group cptpf_sysfs_group = {
570
.attrs = cptpf_attrs,
571
};
572
573
static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
574
{
575
u64 rev;
576
577
rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
578
RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
579
rev = (rev >> 12) & 0xFF;
580
/*
581
* Check if AF has setup revision for RVUM block, otherwise
582
* driver probe should be deferred until AF driver comes up
583
*/
584
if (!rev) {
585
dev_warn(&cptpf->pdev->dev,
586
"AF is not initialized, deferring probe\n");
587
return -EPROBE_DEFER;
588
}
589
return 0;
590
}
591
592
static void cptpf_get_rid(struct pci_dev *pdev, struct otx2_cptpf_dev *cptpf)
593
{
594
struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
595
u64 reg_val = 0x0;
596
597
if (is_dev_otx2(pdev)) {
598
eng_grps->rid = pdev->revision;
599
return;
600
}
601
otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
602
BLKADDR_CPT0);
603
if ((cpt_feature_sgv2(pdev) && (reg_val & BIT_ULL(18))) ||
604
is_dev_cn10ka_ax(pdev))
605
eng_grps->rid = CPT_UC_RID_CN10K_A;
606
else if (cpt_feature_sgv2(pdev))
607
eng_grps->rid = CPT_UC_RID_CN10K_B;
608
}
609
610
static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
611
{
612
u64 cfg;
613
614
cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
615
RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
616
if (cfg & BIT_ULL(11))
617
cptpf->has_cpt1 = true;
618
}
619
620
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
621
{
622
union otx2_cptx_af_constants1 af_cnsts1 = {0};
623
int ret = 0;
624
625
/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
626
cptpf_check_block_implemented(cptpf);
627
628
/* Get number of SE, IE and AE engines */
629
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
630
CPT_AF_CONSTANTS1, &af_cnsts1.u,
631
BLKADDR_CPT0);
632
if (ret)
633
return ret;
634
635
cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
636
cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
637
cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
638
639
/* Disable all cores */
640
ret = otx2_cpt_disable_all_cores(cptpf);
641
642
otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
643
&cptpf->afpf_mbox, BLKADDR_CPT0);
644
if (cptpf->has_cpt1)
645
otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
646
cptpf->reg_base, &cptpf->afpf_mbox,
647
BLKADDR_CPT1);
648
return ret;
649
}
650
651
static int cptpf_sriov_disable(struct pci_dev *pdev)
652
{
653
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
654
int num_vfs = pci_num_vf(pdev);
655
656
if (!num_vfs)
657
return 0;
658
659
pci_disable_sriov(pdev);
660
cptpf_unregister_vfpf_intr(cptpf, num_vfs);
661
cptpf_flr_wq_destroy(cptpf);
662
cptpf_vfpf_mbox_destroy(cptpf);
663
module_put(THIS_MODULE);
664
cptpf->enabled_vfs = 0;
665
666
return 0;
667
}
668
669
static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
670
{
671
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
672
int ret;
673
674
/* Initialize VF<=>PF mailbox */
675
ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
676
if (ret)
677
return ret;
678
679
ret = cptpf_flr_wq_init(cptpf, num_vfs);
680
if (ret)
681
goto destroy_mbox;
682
/* Register VF<=>PF mailbox interrupt */
683
ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
684
if (ret)
685
goto destroy_flr;
686
687
cptpf_get_rid(pdev, cptpf);
688
/* Get CPT HW capabilities using LOAD_FVC operation. */
689
ret = otx2_cpt_discover_eng_capabilities(cptpf);
690
if (ret)
691
goto disable_intr;
692
693
ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
694
if (ret)
695
goto disable_intr;
696
697
cptpf->enabled_vfs = num_vfs;
698
ret = pci_enable_sriov(pdev, num_vfs);
699
if (ret)
700
goto disable_intr;
701
702
dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
703
704
try_module_get(THIS_MODULE);
705
return num_vfs;
706
707
disable_intr:
708
cptpf_unregister_vfpf_intr(cptpf, num_vfs);
709
cptpf->enabled_vfs = 0;
710
destroy_flr:
711
cptpf_flr_wq_destroy(cptpf);
712
destroy_mbox:
713
cptpf_vfpf_mbox_destroy(cptpf);
714
return ret;
715
}
716
717
static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
718
{
719
if (num_vfs > 0) {
720
return cptpf_sriov_enable(pdev, num_vfs);
721
} else {
722
return cptpf_sriov_disable(pdev);
723
}
724
}
725
726
static int otx2_cptpf_probe(struct pci_dev *pdev,
727
const struct pci_device_id *ent)
728
{
729
struct device *dev = &pdev->dev;
730
struct otx2_cptpf_dev *cptpf;
731
int err, num_vec;
732
733
cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
734
if (!cptpf)
735
return -ENOMEM;
736
737
err = pcim_enable_device(pdev);
738
if (err) {
739
dev_err(dev, "Failed to enable PCI device\n");
740
goto clear_drvdata;
741
}
742
743
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
744
if (err) {
745
dev_err(dev, "Unable to get usable DMA configuration\n");
746
goto clear_drvdata;
747
}
748
err = pcim_request_all_regions(pdev, OTX2_CPT_DRV_NAME);
749
if (err) {
750
dev_err(dev, "Couldn't request PCI resources 0x%x\n", err);
751
goto clear_drvdata;
752
}
753
pci_set_master(pdev);
754
pci_set_drvdata(pdev, cptpf);
755
cptpf->pdev = pdev;
756
757
/* Map PF's configuration registers */
758
cptpf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
759
if (!cptpf->reg_base) {
760
err = -ENOMEM;
761
dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", err);
762
goto clear_drvdata;
763
}
764
765
/* Check if AF driver is up, otherwise defer probe */
766
err = cpt_is_pf_usable(cptpf);
767
if (err)
768
goto clear_drvdata;
769
770
num_vec = pci_msix_vec_count(cptpf->pdev);
771
if (num_vec <= 0) {
772
err = -EINVAL;
773
goto clear_drvdata;
774
}
775
776
err = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSIX);
777
if (err < 0) {
778
dev_err(dev, "Request for %d msix vectors failed\n",
779
RVU_PF_INT_VEC_CNT);
780
goto clear_drvdata;
781
}
782
otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
783
/* Initialize AF-PF mailbox */
784
err = cptpf_afpf_mbox_init(cptpf);
785
if (err)
786
goto clear_drvdata;
787
/* Register mailbox interrupt */
788
err = cptpf_register_afpf_mbox_intr(cptpf);
789
if (err)
790
goto destroy_afpf_mbox;
791
792
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
793
cptpf->kvf_limits = 1;
794
795
/* Initialize CPT PF device */
796
err = cptpf_device_init(cptpf);
797
if (err)
798
goto unregister_intr;
799
800
err = cn10k_cptpf_lmtst_init(cptpf);
801
if (err)
802
goto unregister_intr;
803
804
/* Initialize engine groups */
805
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
806
if (err)
807
goto free_lmtst;
808
809
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
810
if (err)
811
goto cleanup_eng_grps;
812
813
err = otx2_cpt_register_dl(cptpf);
814
if (err)
815
goto sysfs_grp_del;
816
817
return 0;
818
819
sysfs_grp_del:
820
sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
821
cleanup_eng_grps:
822
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
823
free_lmtst:
824
cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
825
unregister_intr:
826
cptpf_disable_afpf_mbox_intr(cptpf);
827
destroy_afpf_mbox:
828
cptpf_afpf_mbox_destroy(cptpf);
829
clear_drvdata:
830
pci_set_drvdata(pdev, NULL);
831
return err;
832
}
833
834
static void otx2_cptpf_remove(struct pci_dev *pdev)
835
{
836
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
837
838
if (!cptpf)
839
return;
840
841
cptpf_sriov_disable(pdev);
842
otx2_cpt_unregister_dl(cptpf);
843
844
/* Cleanup Inline CPT LF's if attached */
845
if (cptpf->lfs.lfs_num)
846
otx2_inline_cptlf_cleanup(&cptpf->lfs);
847
848
if (cptpf->cpt1_lfs.lfs_num)
849
otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
850
851
/* Delete sysfs entry created for kernel VF limits */
852
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
853
/* Cleanup engine groups */
854
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
855
/* Disable AF-PF mailbox interrupt */
856
cptpf_disable_afpf_mbox_intr(cptpf);
857
/* Destroy AF-PF mbox */
858
cptpf_afpf_mbox_destroy(cptpf);
859
/* Free LMTST memory */
860
cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
861
pci_set_drvdata(pdev, NULL);
862
}
863
864
/* Supported devices */
865
static const struct pci_device_id otx2_cpt_id_table[] = {
866
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
867
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
868
{ 0, } /* end of table */
869
};
870
871
static struct pci_driver otx2_cpt_pci_driver = {
872
.name = OTX2_CPT_DRV_NAME,
873
.id_table = otx2_cpt_id_table,
874
.probe = otx2_cptpf_probe,
875
.remove = otx2_cptpf_remove,
876
.sriov_configure = otx2_cptpf_sriov_configure
877
};
878
879
module_pci_driver(otx2_cpt_pci_driver);
880
881
MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
882
883
MODULE_AUTHOR("Marvell");
884
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
885
MODULE_LICENSE("GPL v2");
886
MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
887
888