Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
53997 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (C) 2020 Marvell. */
3
4
#include <linux/firmware.h>
5
#include <linux/sysfs.h>
6
#include "otx2_cpt_hw_types.h"
7
#include "otx2_cpt_common.h"
8
#include "otx2_cpt_devlink.h"
9
#include "otx2_cptpf_ucode.h"
10
#include "otx2_cptpf.h"
11
#include "cn10k_cpt.h"
12
#include "rvu_reg.h"
13
14
#define OTX2_CPT_DRV_NAME "rvu_cptpf"
15
#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
16
17
#define CPT_UC_RID_CN9K_B0 1
18
#define CPT_UC_RID_CN10K_A 4
19
#define CPT_UC_RID_CN10K_B 5
20
21
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
22
int num_vfs)
23
{
24
int ena_bits;
25
26
/* Clear any pending interrupts */
27
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
28
RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
29
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
30
RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
31
32
/* Enable VF interrupts for VFs from 0 to 63 */
33
ena_bits = ((num_vfs - 1) % 64);
34
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
35
RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
36
GENMASK_ULL(ena_bits, 0));
37
38
if (num_vfs > 64) {
39
/* Enable VF interrupts for VFs from 64 to 127 */
40
ena_bits = num_vfs - 64 - 1;
41
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
42
RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
43
GENMASK_ULL(ena_bits, 0));
44
}
45
}
46
47
static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
48
int num_vfs)
49
{
50
int vector;
51
52
/* Disable VF-PF interrupts */
53
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
54
RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
55
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
56
RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
57
/* Clear any pending interrupts */
58
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
59
RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
60
61
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
62
free_irq(vector, cptpf);
63
64
if (num_vfs > 64) {
65
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
66
RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
67
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
68
free_irq(vector, cptpf);
69
}
70
}
71
72
static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
73
int num_vfs)
74
{
75
/* Clear FLR interrupt if any */
76
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
77
INTR_MASK(num_vfs));
78
79
/* Enable VF FLR interrupts */
80
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
81
RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
82
/* Clear ME interrupt if any */
83
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
84
INTR_MASK(num_vfs));
85
/* Enable VF ME interrupts */
86
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
87
RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
88
89
if (num_vfs <= 64)
90
return;
91
92
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
93
INTR_MASK(num_vfs - 64));
94
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
95
RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
96
97
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
98
INTR_MASK(num_vfs - 64));
99
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
100
RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
101
}
102
103
static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
104
int num_vfs)
105
{
106
int vector;
107
108
/* Disable VF FLR interrupts */
109
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
110
RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
111
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
112
free_irq(vector, cptpf);
113
114
/* Disable VF ME interrupts */
115
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
116
RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
117
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
118
free_irq(vector, cptpf);
119
120
if (num_vfs <= 64)
121
return;
122
123
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
124
RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
125
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
126
free_irq(vector, cptpf);
127
128
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
129
RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
130
vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
131
free_irq(vector, cptpf);
132
}
133
134
static void cptpf_flr_wq_handler(struct work_struct *work)
135
{
136
struct cptpf_flr_work *flr_work;
137
struct otx2_cptpf_dev *pf;
138
struct mbox_msghdr *req;
139
struct otx2_mbox *mbox;
140
int vf, reg = 0;
141
142
flr_work = container_of(work, struct cptpf_flr_work, work);
143
pf = flr_work->pf;
144
mbox = &pf->afpf_mbox;
145
146
vf = flr_work - pf->flr_work;
147
148
mutex_lock(&pf->lock);
149
req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
150
sizeof(struct msg_rsp));
151
if (!req) {
152
mutex_unlock(&pf->lock);
153
return;
154
}
155
156
req->sig = OTX2_MBOX_REQ_SIG;
157
req->id = MBOX_MSG_VF_FLR;
158
req->pcifunc &= RVU_PFVF_FUNC_MASK;
159
req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
160
161
otx2_cpt_send_mbox_msg(mbox, pf->pdev);
162
if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
163
164
if (vf >= 64) {
165
reg = 1;
166
vf = vf - 64;
167
}
168
/* Clear transaction pending register */
169
otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
170
RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
171
otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
172
RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
173
}
174
mutex_unlock(&pf->lock);
175
}
176
177
static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
178
{
179
int reg, dev, vf, start_vf, num_reg = 1;
180
struct otx2_cptpf_dev *cptpf = arg;
181
u64 intr;
182
183
if (cptpf->max_vfs > 64)
184
num_reg = 2;
185
186
for (reg = 0; reg < num_reg; reg++) {
187
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
188
RVU_PF_VFFLR_INTX(reg));
189
if (!intr)
190
continue;
191
start_vf = 64 * reg;
192
for (vf = 0; vf < 64; vf++) {
193
if (!(intr & BIT_ULL(vf)))
194
continue;
195
dev = vf + start_vf;
196
queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
197
/* Clear interrupt */
198
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
199
RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
200
/* Disable the interrupt */
201
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
202
RVU_PF_VFFLR_INT_ENA_W1CX(reg),
203
BIT_ULL(vf));
204
}
205
}
206
return IRQ_HANDLED;
207
}
208
209
static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
210
{
211
struct otx2_cptpf_dev *cptpf = arg;
212
int reg, vf, num_reg = 1;
213
u64 intr;
214
215
if (cptpf->max_vfs > 64)
216
num_reg = 2;
217
218
for (reg = 0; reg < num_reg; reg++) {
219
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
220
RVU_PF_VFME_INTX(reg));
221
if (!intr)
222
continue;
223
for (vf = 0; vf < 64; vf++) {
224
if (!(intr & BIT_ULL(vf)))
225
continue;
226
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
227
RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
228
/* Clear interrupt */
229
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
230
RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
231
}
232
}
233
return IRQ_HANDLED;
234
}
235
236
static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
237
int num_vfs)
238
{
239
cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
240
cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
241
}
242
243
static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
244
{
245
struct pci_dev *pdev = cptpf->pdev;
246
struct device *dev = &pdev->dev;
247
int ret, vector;
248
249
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
250
/* Register VF-PF mailbox interrupt handler */
251
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
252
cptpf);
253
if (ret) {
254
dev_err(dev,
255
"IRQ registration failed for PFVF mbox0 irq\n");
256
return ret;
257
}
258
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
259
/* Register VF FLR interrupt handler */
260
ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
261
if (ret) {
262
dev_err(dev,
263
"IRQ registration failed for VFFLR0 irq\n");
264
goto free_mbox0_irq;
265
}
266
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
267
/* Register VF ME interrupt handler */
268
ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
269
if (ret) {
270
dev_err(dev,
271
"IRQ registration failed for PFVF mbox0 irq\n");
272
goto free_flr0_irq;
273
}
274
275
if (num_vfs > 64) {
276
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
277
ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
278
"CPTVFPF Mbox1", cptpf);
279
if (ret) {
280
dev_err(dev,
281
"IRQ registration failed for PFVF mbox1 irq\n");
282
goto free_me0_irq;
283
}
284
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
285
/* Register VF FLR interrupt handler */
286
ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
287
cptpf);
288
if (ret) {
289
dev_err(dev,
290
"IRQ registration failed for VFFLR1 irq\n");
291
goto free_mbox1_irq;
292
}
293
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
294
/* Register VF FLR interrupt handler */
295
ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
296
cptpf);
297
if (ret) {
298
dev_err(dev,
299
"IRQ registration failed for VFFLR1 irq\n");
300
goto free_flr1_irq;
301
}
302
}
303
cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
304
cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
305
306
return 0;
307
308
free_flr1_irq:
309
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
310
free_irq(vector, cptpf);
311
free_mbox1_irq:
312
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
313
free_irq(vector, cptpf);
314
free_me0_irq:
315
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
316
free_irq(vector, cptpf);
317
free_flr0_irq:
318
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
319
free_irq(vector, cptpf);
320
free_mbox0_irq:
321
vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
322
free_irq(vector, cptpf);
323
return ret;
324
}
325
326
static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
327
{
328
if (!pf->flr_wq)
329
return;
330
destroy_workqueue(pf->flr_wq);
331
pf->flr_wq = NULL;
332
kfree(pf->flr_work);
333
}
334
335
static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
336
{
337
int vf;
338
339
cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
340
if (!cptpf->flr_wq)
341
return -ENOMEM;
342
343
cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
344
GFP_KERNEL);
345
if (!cptpf->flr_work)
346
goto destroy_wq;
347
348
for (vf = 0; vf < num_vfs; vf++) {
349
cptpf->flr_work[vf].pf = cptpf;
350
INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
351
}
352
return 0;
353
354
destroy_wq:
355
destroy_workqueue(cptpf->flr_wq);
356
return -ENOMEM;
357
}
358
359
static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
360
{
361
struct device *dev = &cptpf->pdev->dev;
362
u64 vfpf_mbox_base;
363
int err, i;
364
365
cptpf->vfpf_mbox_wq =
366
alloc_ordered_workqueue("cpt_vfpf_mailbox",
367
WQ_HIGHPRI | WQ_MEM_RECLAIM);
368
if (!cptpf->vfpf_mbox_wq)
369
return -ENOMEM;
370
371
/* Map VF-PF mailbox memory */
372
if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
373
vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
374
else
375
vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
376
377
if (!vfpf_mbox_base) {
378
dev_err(dev, "VF-PF mailbox address not configured\n");
379
err = -ENOMEM;
380
goto free_wqe;
381
}
382
cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
383
MBOX_SIZE * cptpf->max_vfs);
384
if (!cptpf->vfpf_mbox_base) {
385
dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
386
err = -ENOMEM;
387
goto free_wqe;
388
}
389
err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
390
cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
391
num_vfs);
392
if (err)
393
goto free_wqe;
394
395
for (i = 0; i < num_vfs; i++) {
396
cptpf->vf[i].vf_id = i;
397
cptpf->vf[i].cptpf = cptpf;
398
cptpf->vf[i].intr_idx = i % 64;
399
INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
400
otx2_cptpf_vfpf_mbox_handler);
401
}
402
return 0;
403
404
free_wqe:
405
destroy_workqueue(cptpf->vfpf_mbox_wq);
406
return err;
407
}
408
409
static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
410
{
411
destroy_workqueue(cptpf->vfpf_mbox_wq);
412
otx2_mbox_destroy(&cptpf->vfpf_mbox);
413
}
414
415
static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
416
{
417
/* Disable AF-PF interrupt */
418
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
419
0x1ULL);
420
/* Clear interrupt if any */
421
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
422
}
423
424
static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
425
{
426
struct pci_dev *pdev = cptpf->pdev;
427
struct device *dev = &pdev->dev;
428
int ret, irq;
429
430
irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
431
/* Register AF-PF mailbox interrupt handler */
432
ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
433
"CPTAFPF Mbox", cptpf);
434
if (ret) {
435
dev_err(dev,
436
"IRQ registration failed for PFAF mbox irq\n");
437
return ret;
438
}
439
/* Clear interrupt if any, to avoid spurious interrupts */
440
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
441
/* Enable AF-PF interrupt */
442
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
443
0x1ULL);
444
445
ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
446
if (ret) {
447
dev_warn(dev,
448
"AF not responding to mailbox, deferring probe\n");
449
cptpf_disable_afpf_mbox_intr(cptpf);
450
return -EPROBE_DEFER;
451
}
452
return 0;
453
}
454
455
static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
456
{
457
struct pci_dev *pdev = cptpf->pdev;
458
resource_size_t offset;
459
int err;
460
461
cptpf->afpf_mbox_wq =
462
alloc_ordered_workqueue("cpt_afpf_mailbox",
463
WQ_HIGHPRI | WQ_MEM_RECLAIM);
464
if (!cptpf->afpf_mbox_wq)
465
return -ENOMEM;
466
467
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
468
/* Map AF-PF mailbox memory */
469
cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
470
if (!cptpf->afpf_mbox_base) {
471
dev_err(&pdev->dev, "Unable to map BAR4\n");
472
err = -ENOMEM;
473
goto error;
474
}
475
476
err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
477
pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
478
if (err)
479
goto error;
480
481
err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
482
pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
483
if (err)
484
goto mbox_cleanup;
485
486
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
487
INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
488
mutex_init(&cptpf->lock);
489
490
return 0;
491
492
mbox_cleanup:
493
otx2_mbox_destroy(&cptpf->afpf_mbox);
494
error:
495
destroy_workqueue(cptpf->afpf_mbox_wq);
496
return err;
497
}
498
499
static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
500
{
501
destroy_workqueue(cptpf->afpf_mbox_wq);
502
otx2_mbox_destroy(&cptpf->afpf_mbox);
503
otx2_mbox_destroy(&cptpf->afpf_mbox_up);
504
}
505
506
static ssize_t sso_pf_func_ovrd_show(struct device *dev,
507
struct device_attribute *attr, char *buf)
508
{
509
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
510
511
return sysfs_emit(buf, "%d\n", cptpf->sso_pf_func_ovrd);
512
}
513
514
static ssize_t sso_pf_func_ovrd_store(struct device *dev,
515
struct device_attribute *attr,
516
const char *buf, size_t count)
517
{
518
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
519
u8 sso_pf_func_ovrd;
520
521
if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
522
return count;
523
524
if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
525
return -EINVAL;
526
527
cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
528
529
return count;
530
}
531
532
static ssize_t kvf_limits_show(struct device *dev,
533
struct device_attribute *attr, char *buf)
534
{
535
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
536
537
return sysfs_emit(buf, "%d\n", cptpf->kvf_limits);
538
}
539
540
static ssize_t kvf_limits_store(struct device *dev,
541
struct device_attribute *attr,
542
const char *buf, size_t count)
543
{
544
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
545
int lfs_num;
546
int ret;
547
548
ret = kstrtoint(buf, 0, &lfs_num);
549
if (ret)
550
return ret;
551
if (lfs_num < 1 || lfs_num > num_online_cpus()) {
552
dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
553
lfs_num, num_online_cpus());
554
return -EINVAL;
555
}
556
cptpf->kvf_limits = lfs_num;
557
558
return count;
559
}
560
561
static DEVICE_ATTR_RW(kvf_limits);
562
static DEVICE_ATTR_RW(sso_pf_func_ovrd);
563
564
static struct attribute *cptpf_attrs[] = {
565
&dev_attr_kvf_limits.attr,
566
&dev_attr_sso_pf_func_ovrd.attr,
567
NULL
568
};
569
570
static const struct attribute_group cptpf_sysfs_group = {
571
.attrs = cptpf_attrs,
572
};
573
574
static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
575
{
576
u64 rev;
577
578
rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
579
RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
580
rev = (rev >> 12) & 0xFF;
581
/*
582
* Check if AF has setup revision for RVUM block, otherwise
583
* driver probe should be deferred until AF driver comes up
584
*/
585
if (!rev) {
586
dev_warn(&cptpf->pdev->dev,
587
"AF is not initialized, deferring probe\n");
588
return -EPROBE_DEFER;
589
}
590
return 0;
591
}
592
593
static void cptpf_get_rid(struct pci_dev *pdev, struct otx2_cptpf_dev *cptpf)
594
{
595
struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
596
u64 reg_val = 0x0;
597
598
if (is_dev_otx2(pdev)) {
599
eng_grps->rid = pdev->revision;
600
return;
601
}
602
otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
603
BLKADDR_CPT0);
604
if ((cpt_feature_sgv2(pdev) && (reg_val & BIT_ULL(18))) ||
605
is_dev_cn10ka_ax(pdev))
606
eng_grps->rid = CPT_UC_RID_CN10K_A;
607
else if (cpt_feature_sgv2(pdev))
608
eng_grps->rid = CPT_UC_RID_CN10K_B;
609
}
610
611
static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
612
{
613
u64 cfg;
614
615
cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
616
RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
617
if (cfg & BIT_ULL(11))
618
cptpf->has_cpt1 = true;
619
}
620
621
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
622
{
623
union otx2_cptx_af_constants1 af_cnsts1 = {0};
624
int ret = 0;
625
626
/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
627
cptpf_check_block_implemented(cptpf);
628
629
/* Get number of SE, IE and AE engines */
630
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
631
CPT_AF_CONSTANTS1, &af_cnsts1.u,
632
BLKADDR_CPT0);
633
if (ret)
634
return ret;
635
636
cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
637
cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
638
cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
639
640
/* Disable all cores */
641
ret = otx2_cpt_disable_all_cores(cptpf);
642
643
otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
644
&cptpf->afpf_mbox, BLKADDR_CPT0);
645
if (cptpf->has_cpt1)
646
otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
647
cptpf->reg_base, &cptpf->afpf_mbox,
648
BLKADDR_CPT1);
649
return ret;
650
}
651
652
static int cptpf_sriov_disable(struct pci_dev *pdev)
653
{
654
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
655
int num_vfs = pci_num_vf(pdev);
656
657
if (!num_vfs)
658
return 0;
659
660
pci_disable_sriov(pdev);
661
cptpf_unregister_vfpf_intr(cptpf, num_vfs);
662
cptpf_flr_wq_destroy(cptpf);
663
cptpf_vfpf_mbox_destroy(cptpf);
664
module_put(THIS_MODULE);
665
cptpf->enabled_vfs = 0;
666
667
return 0;
668
}
669
670
static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
671
{
672
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
673
int ret;
674
675
/* Initialize VF<=>PF mailbox */
676
ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
677
if (ret)
678
return ret;
679
680
ret = cptpf_flr_wq_init(cptpf, num_vfs);
681
if (ret)
682
goto destroy_mbox;
683
/* Register VF<=>PF mailbox interrupt */
684
ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
685
if (ret)
686
goto destroy_flr;
687
688
cptpf_get_rid(pdev, cptpf);
689
/* Get CPT HW capabilities using LOAD_FVC operation. */
690
ret = otx2_cpt_discover_eng_capabilities(cptpf);
691
if (ret)
692
goto disable_intr;
693
694
ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
695
if (ret)
696
goto disable_intr;
697
698
cptpf->enabled_vfs = num_vfs;
699
ret = pci_enable_sriov(pdev, num_vfs);
700
if (ret)
701
goto disable_intr;
702
703
dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
704
705
try_module_get(THIS_MODULE);
706
return num_vfs;
707
708
disable_intr:
709
cptpf_unregister_vfpf_intr(cptpf, num_vfs);
710
cptpf->enabled_vfs = 0;
711
destroy_flr:
712
cptpf_flr_wq_destroy(cptpf);
713
destroy_mbox:
714
cptpf_vfpf_mbox_destroy(cptpf);
715
return ret;
716
}
717
718
static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
719
{
720
if (num_vfs > 0) {
721
return cptpf_sriov_enable(pdev, num_vfs);
722
} else {
723
return cptpf_sriov_disable(pdev);
724
}
725
}
726
727
static int otx2_cptpf_probe(struct pci_dev *pdev,
728
const struct pci_device_id *ent)
729
{
730
struct device *dev = &pdev->dev;
731
struct otx2_cptpf_dev *cptpf;
732
int err, num_vec;
733
734
cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
735
if (!cptpf)
736
return -ENOMEM;
737
738
err = pcim_enable_device(pdev);
739
if (err) {
740
dev_err(dev, "Failed to enable PCI device\n");
741
goto clear_drvdata;
742
}
743
744
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
745
if (err) {
746
dev_err(dev, "Unable to get usable DMA configuration\n");
747
goto clear_drvdata;
748
}
749
err = pcim_request_all_regions(pdev, OTX2_CPT_DRV_NAME);
750
if (err) {
751
dev_err(dev, "Couldn't request PCI resources 0x%x\n", err);
752
goto clear_drvdata;
753
}
754
pci_set_master(pdev);
755
pci_set_drvdata(pdev, cptpf);
756
cptpf->pdev = pdev;
757
758
/* Map PF's configuration registers */
759
cptpf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
760
if (!cptpf->reg_base) {
761
err = -ENOMEM;
762
dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", err);
763
goto clear_drvdata;
764
}
765
766
/* Check if AF driver is up, otherwise defer probe */
767
err = cpt_is_pf_usable(cptpf);
768
if (err)
769
goto clear_drvdata;
770
771
num_vec = pci_msix_vec_count(cptpf->pdev);
772
if (num_vec <= 0) {
773
err = -EINVAL;
774
goto clear_drvdata;
775
}
776
777
err = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSIX);
778
if (err < 0) {
779
dev_err(dev, "Request for %d msix vectors failed\n",
780
RVU_PF_INT_VEC_CNT);
781
goto clear_drvdata;
782
}
783
otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
784
/* Initialize AF-PF mailbox */
785
err = cptpf_afpf_mbox_init(cptpf);
786
if (err)
787
goto clear_drvdata;
788
/* Register mailbox interrupt */
789
err = cptpf_register_afpf_mbox_intr(cptpf);
790
if (err)
791
goto destroy_afpf_mbox;
792
793
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
794
cptpf->kvf_limits = 1;
795
796
/* Initialize CPT PF device */
797
err = cptpf_device_init(cptpf);
798
if (err)
799
goto unregister_intr;
800
801
err = cn10k_cptpf_lmtst_init(cptpf);
802
if (err)
803
goto unregister_intr;
804
805
/* Initialize engine groups */
806
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
807
if (err)
808
goto free_lmtst;
809
810
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
811
if (err)
812
goto cleanup_eng_grps;
813
814
err = otx2_cpt_register_dl(cptpf);
815
if (err)
816
goto sysfs_grp_del;
817
818
return 0;
819
820
sysfs_grp_del:
821
sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
822
cleanup_eng_grps:
823
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
824
free_lmtst:
825
cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
826
unregister_intr:
827
cptpf_disable_afpf_mbox_intr(cptpf);
828
destroy_afpf_mbox:
829
cptpf_afpf_mbox_destroy(cptpf);
830
clear_drvdata:
831
pci_set_drvdata(pdev, NULL);
832
return err;
833
}
834
835
static void otx2_cptpf_remove(struct pci_dev *pdev)
836
{
837
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
838
839
if (!cptpf)
840
return;
841
842
cptpf_sriov_disable(pdev);
843
otx2_cpt_unregister_dl(cptpf);
844
845
/* Cleanup Inline CPT LF's if attached */
846
if (cptpf->lfs.lfs_num)
847
otx2_inline_cptlf_cleanup(&cptpf->lfs);
848
849
if (cptpf->cpt1_lfs.lfs_num)
850
otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
851
852
/* Delete sysfs entry created for kernel VF limits */
853
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
854
/* Cleanup engine groups */
855
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
856
/* Disable AF-PF mailbox interrupt */
857
cptpf_disable_afpf_mbox_intr(cptpf);
858
/* Destroy AF-PF mbox */
859
cptpf_afpf_mbox_destroy(cptpf);
860
/* Free LMTST memory */
861
cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
862
pci_set_drvdata(pdev, NULL);
863
}
864
865
/* Supported devices */
866
static const struct pci_device_id otx2_cpt_id_table[] = {
867
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
868
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
869
{ 0, } /* end of table */
870
};
871
872
static struct pci_driver otx2_cpt_pci_driver = {
873
.name = OTX2_CPT_DRV_NAME,
874
.id_table = otx2_cpt_id_table,
875
.probe = otx2_cptpf_probe,
876
.remove = otx2_cptpf_remove,
877
.sriov_configure = otx2_cptpf_sriov_configure
878
};
879
880
module_pci_driver(otx2_cpt_pci_driver);
881
882
MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
883
884
MODULE_AUTHOR("Marvell");
885
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
886
MODULE_LICENSE("GPL v2");
887
MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
888
889