Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (C) 2020 Marvell. */
3
4
#include "otx2_cpt_common.h"
5
#include "otx2_cptpf.h"
6
#include "rvu_reg.h"
7
8
/* Fastpath ipsec opcode with inplace processing */
9
#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
10
#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
11
12
#define cpt_inline_rx_opcode(pdev) \
13
({ \
14
u8 opcode; \
15
if (is_dev_otx2(pdev)) \
16
opcode = CPT_INLINE_RX_OPCODE; \
17
else \
18
opcode = CN10K_CPT_INLINE_RX_OPCODE; \
19
(opcode); \
20
})
21
22
/*
23
* CPT PF driver version, It will be incremented by 1 for every feature
24
* addition in CPT mailbox messages.
25
*/
26
#define OTX2_CPT_PF_DRV_VERSION 0x1
27
28
static int forward_to_af(struct otx2_cptpf_dev *cptpf,
29
struct otx2_cptvf_info *vf,
30
struct mbox_msghdr *req, int size)
31
{
32
struct mbox_msghdr *msg;
33
int ret;
34
35
mutex_lock(&cptpf->lock);
36
msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
37
if (msg == NULL) {
38
mutex_unlock(&cptpf->lock);
39
return -ENOMEM;
40
}
41
42
memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
43
(uint8_t *)req + sizeof(struct mbox_msghdr), size);
44
msg->id = req->id;
45
msg->pcifunc = req->pcifunc;
46
msg->sig = req->sig;
47
msg->ver = req->ver;
48
49
ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
50
/* Error code -EIO indicate there is a communication failure
51
* to the AF. Rest of the error codes indicate that AF processed
52
* VF messages and set the error codes in response messages
53
* (if any) so simply forward responses to VF.
54
*/
55
if (ret == -EIO) {
56
dev_warn(&cptpf->pdev->dev,
57
"AF not responding to VF%d messages\n", vf->vf_id);
58
mutex_unlock(&cptpf->lock);
59
return ret;
60
}
61
mutex_unlock(&cptpf->lock);
62
return 0;
63
}
64
65
static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
66
struct otx2_cptvf_info *vf,
67
struct mbox_msghdr *req)
68
{
69
struct otx2_cpt_caps_rsp *rsp;
70
71
rsp = (struct otx2_cpt_caps_rsp *)
72
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
73
sizeof(*rsp));
74
if (!rsp)
75
return -ENOMEM;
76
77
rsp->hdr.id = MBOX_MSG_GET_CAPS;
78
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
79
rsp->hdr.pcifunc = req->pcifunc;
80
rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
81
rsp->cpt_revision = cptpf->eng_grps.rid;
82
memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
83
84
return 0;
85
}
86
87
static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
88
struct otx2_cptvf_info *vf,
89
struct mbox_msghdr *req)
90
{
91
struct otx2_cpt_egrp_num_msg *grp_req;
92
struct otx2_cpt_egrp_num_rsp *rsp;
93
94
grp_req = (struct otx2_cpt_egrp_num_msg *)req;
95
rsp = (struct otx2_cpt_egrp_num_rsp *)
96
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
97
if (!rsp)
98
return -ENOMEM;
99
100
rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
101
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
102
rsp->hdr.pcifunc = req->pcifunc;
103
rsp->eng_type = grp_req->eng_type;
104
rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
105
grp_req->eng_type);
106
107
return 0;
108
}
109
110
static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
111
struct otx2_cptvf_info *vf,
112
struct mbox_msghdr *req)
113
{
114
struct otx2_cpt_kvf_limits_rsp *rsp;
115
116
rsp = (struct otx2_cpt_kvf_limits_rsp *)
117
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
118
if (!rsp)
119
return -ENOMEM;
120
121
rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
122
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
123
rsp->hdr.pcifunc = req->pcifunc;
124
rsp->kvf_limits = cptpf->kvf_limits;
125
126
return 0;
127
}
128
129
static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
130
int sso_pf_func, u8 slot)
131
{
132
struct cpt_inline_ipsec_cfg_msg *req;
133
struct pci_dev *pdev = cptpf->pdev;
134
135
req = (struct cpt_inline_ipsec_cfg_msg *)
136
otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
137
sizeof(*req), sizeof(struct msg_rsp));
138
if (req == NULL) {
139
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
140
return -EFAULT;
141
}
142
memset(req, 0, sizeof(*req));
143
req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
144
req->hdr.sig = OTX2_MBOX_REQ_SIG;
145
req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
146
req->dir = CPT_INLINE_INBOUND;
147
req->slot = slot;
148
req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
149
req->sso_pf_func = sso_pf_func;
150
req->enable = 1;
151
152
return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
153
}
154
155
static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
156
struct otx2_cpt_rx_inline_lf_cfg *req)
157
{
158
struct nix_inline_ipsec_cfg *nix_req;
159
struct pci_dev *pdev = cptpf->pdev;
160
int ret;
161
162
nix_req = (struct nix_inline_ipsec_cfg *)
163
otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
164
sizeof(*nix_req),
165
sizeof(struct msg_rsp));
166
if (nix_req == NULL) {
167
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
168
return -EFAULT;
169
}
170
memset(nix_req, 0, sizeof(*nix_req));
171
nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
172
nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
173
nix_req->enable = 1;
174
nix_req->credit_th = req->credit_th;
175
nix_req->bpid = req->bpid;
176
if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
177
nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
178
else
179
nix_req->cpt_credit = req->credit - 1;
180
nix_req->gen_cfg.egrp = egrp;
181
if (req->opcode)
182
nix_req->gen_cfg.opcode = req->opcode;
183
else
184
nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
185
nix_req->gen_cfg.param1 = req->param1;
186
nix_req->gen_cfg.param2 = req->param2;
187
nix_req->inst_qsel.cpt_pf_func =
188
OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
189
nix_req->inst_qsel.cpt_slot = 0;
190
ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
191
if (ret)
192
return ret;
193
194
if (cptpf->has_cpt1) {
195
ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
196
if (ret)
197
return ret;
198
}
199
200
return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
201
}
202
203
int
204
otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf,
205
struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs)
206
{
207
int ret;
208
209
ret = otx2_cptlf_init(lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, 1);
210
if (ret) {
211
dev_err(&cptpf->pdev->dev,
212
"LF configuration failed for RX inline ipsec.\n");
213
return ret;
214
}
215
216
/* Get msix offsets for attached LFs */
217
ret = otx2_cpt_msix_offset_msg(lfs);
218
if (ret)
219
goto cleanup_lf;
220
221
/* Register for CPT LF Misc interrupts */
222
ret = otx2_cptlf_register_misc_interrupts(lfs);
223
if (ret)
224
goto free_irq;
225
226
return 0;
227
free_irq:
228
otx2_cptlf_unregister_misc_interrupts(lfs);
229
cleanup_lf:
230
otx2_cptlf_shutdown(lfs);
231
return ret;
232
}
233
234
void
235
otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs)
236
{
237
/* Unregister misc interrupt */
238
otx2_cptlf_unregister_misc_interrupts(lfs);
239
240
/* Cleanup LFs */
241
otx2_cptlf_shutdown(lfs);
242
}
243
244
static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
245
struct mbox_msghdr *req)
246
{
247
struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
248
int num_lfs = 1, ret;
249
u8 egrp;
250
251
cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
252
if (cptpf->lfs.lfs_num) {
253
dev_err(&cptpf->pdev->dev,
254
"LF is already configured for RX inline ipsec.\n");
255
return -EEXIST;
256
}
257
/*
258
* Allow LFs to execute requests destined to only grp IE_TYPES and
259
* set queue priority of each LF to high
260
*/
261
egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
262
if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
263
dev_err(&cptpf->pdev->dev,
264
"Engine group for inline ipsec is not available\n");
265
return -ENOENT;
266
}
267
268
cptpf->lfs.global_slot = 0;
269
cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
270
cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
271
272
ret = otx2_inline_cptlf_setup(cptpf, &cptpf->lfs, egrp, num_lfs);
273
if (ret) {
274
dev_err(&cptpf->pdev->dev, "Inline-Ipsec CPT0 LF setup failed.\n");
275
return ret;
276
}
277
278
if (cptpf->has_cpt1) {
279
cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
280
cptpf->cpt1_lfs.global_slot = num_lfs;
281
cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
282
cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
283
ret = otx2_inline_cptlf_setup(cptpf, &cptpf->cpt1_lfs, egrp,
284
num_lfs);
285
if (ret) {
286
dev_err(&cptpf->pdev->dev, "Inline CPT1 LF setup failed.\n");
287
goto lf_cleanup;
288
}
289
cptpf->rsrc_req_blkaddr = 0;
290
}
291
292
ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
293
if (ret)
294
goto lf1_cleanup;
295
296
return 0;
297
298
lf1_cleanup:
299
otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
300
lf_cleanup:
301
otx2_inline_cptlf_cleanup(&cptpf->lfs);
302
return ret;
303
}
304
305
static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
306
struct otx2_cptvf_info *vf,
307
struct mbox_msghdr *req, int size)
308
{
309
int err = 0;
310
311
/* Check if msg is valid, if not reply with an invalid msg */
312
if (req->sig != OTX2_MBOX_REQ_SIG)
313
goto inval_msg;
314
315
switch (req->id) {
316
case MBOX_MSG_GET_ENG_GRP_NUM:
317
err = handle_msg_get_eng_grp_num(cptpf, vf, req);
318
break;
319
case MBOX_MSG_GET_CAPS:
320
err = handle_msg_get_caps(cptpf, vf, req);
321
break;
322
case MBOX_MSG_GET_KVF_LIMITS:
323
err = handle_msg_kvf_limits(cptpf, vf, req);
324
break;
325
case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
326
err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
327
break;
328
329
default:
330
err = forward_to_af(cptpf, vf, req, size);
331
break;
332
}
333
return err;
334
335
inval_msg:
336
otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
337
otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
338
return err;
339
}
340
341
irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
342
{
343
struct otx2_cptpf_dev *cptpf = arg;
344
struct otx2_cptvf_info *vf;
345
int i, vf_idx;
346
u64 intr;
347
348
/*
349
* Check which VF has raised an interrupt and schedule
350
* corresponding work queue to process the messages
351
*/
352
for (i = 0; i < 2; i++) {
353
/* Read the interrupt bits */
354
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
355
RVU_PF_VFPF_MBOX_INTX(i));
356
357
for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
358
vf = &cptpf->vf[vf_idx];
359
if (intr & (1ULL << vf->intr_idx)) {
360
queue_work(cptpf->vfpf_mbox_wq,
361
&vf->vfpf_mbox_work);
362
/* Clear the interrupt */
363
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
364
0, RVU_PF_VFPF_MBOX_INTX(i),
365
BIT_ULL(vf->intr_idx));
366
}
367
}
368
}
369
return IRQ_HANDLED;
370
}
371
372
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
373
{
374
struct otx2_cptpf_dev *cptpf;
375
struct otx2_cptvf_info *vf;
376
struct otx2_mbox_dev *mdev;
377
struct mbox_hdr *req_hdr;
378
struct mbox_msghdr *msg;
379
struct otx2_mbox *mbox;
380
int offset, i, err;
381
382
vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
383
cptpf = vf->cptpf;
384
mbox = &cptpf->vfpf_mbox;
385
/* sync with mbox memory region */
386
smp_rmb();
387
mdev = &mbox->dev[vf->vf_id];
388
/* Process received mbox messages */
389
req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
390
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
391
392
for (i = 0; i < req_hdr->num_msgs; i++) {
393
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
394
395
/* Set which VF sent this message based on mbox IRQ */
396
msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id,
397
(vf->vf_id + 1));
398
err = cptpf_handle_vf_req(cptpf, vf, msg,
399
msg->next_msgoff - offset);
400
/*
401
* Behave as the AF, drop the msg if there is
402
* no memory, timeout handling also goes here
403
*/
404
if (err == -ENOMEM || err == -EIO)
405
break;
406
offset = msg->next_msgoff;
407
/* Write barrier required for VF responses which are handled by
408
* PF driver and not forwarded to AF.
409
*/
410
smp_wmb();
411
}
412
/* Send mbox responses to VF */
413
if (mdev->num_msgs)
414
otx2_mbox_msg_send(mbox, vf->vf_id);
415
}
416
417
irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
418
{
419
struct otx2_cptpf_dev *cptpf = arg;
420
struct otx2_mbox_dev *mdev;
421
struct otx2_mbox *mbox;
422
struct mbox_hdr *hdr;
423
u64 intr;
424
425
/* Read the interrupt bits */
426
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
427
428
if (intr & 0x1ULL) {
429
mbox = &cptpf->afpf_mbox;
430
mdev = &mbox->dev[0];
431
hdr = mdev->mbase + mbox->rx_start;
432
if (hdr->num_msgs)
433
/* Schedule work queue function to process the MBOX request */
434
queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
435
436
mbox = &cptpf->afpf_mbox_up;
437
mdev = &mbox->dev[0];
438
hdr = mdev->mbase + mbox->rx_start;
439
if (hdr->num_msgs)
440
/* Schedule work queue function to process the MBOX request */
441
queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
442
/* Clear and ack the interrupt */
443
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
444
0x1ULL);
445
}
446
return IRQ_HANDLED;
447
}
448
449
static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
450
struct mbox_msghdr *msg)
451
{
452
struct otx2_cptlfs_info *lfs = &cptpf->lfs;
453
struct device *dev = &cptpf->pdev->dev;
454
struct cpt_rd_wr_reg_msg *rsp_rd_wr;
455
struct msix_offset_rsp *rsp_msix;
456
int i;
457
458
if (msg->id >= MBOX_MSG_MAX) {
459
dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
460
return;
461
}
462
if (msg->sig != OTX2_MBOX_RSP_SIG) {
463
dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
464
msg->sig, msg->id);
465
return;
466
}
467
if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
468
lfs = &cptpf->cpt1_lfs;
469
470
switch (msg->id) {
471
case MBOX_MSG_READY:
472
cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc);
473
break;
474
case MBOX_MSG_MSIX_OFFSET:
475
rsp_msix = (struct msix_offset_rsp *) msg;
476
for (i = 0; i < rsp_msix->cptlfs; i++)
477
lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
478
479
for (i = 0; i < rsp_msix->cpt1_lfs; i++)
480
lfs->lf[i].msix_offset = rsp_msix->cpt1_lf_msixoff[i];
481
break;
482
case MBOX_MSG_CPT_RD_WR_REGISTER:
483
rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
484
if (msg->rc) {
485
dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
486
rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
487
msg->rc);
488
return;
489
}
490
if (!rsp_rd_wr->is_write)
491
*rsp_rd_wr->ret_val = rsp_rd_wr->val;
492
break;
493
case MBOX_MSG_ATTACH_RESOURCES:
494
if (!msg->rc)
495
lfs->are_lfs_attached = 1;
496
break;
497
case MBOX_MSG_DETACH_RESOURCES:
498
if (!msg->rc)
499
lfs->are_lfs_attached = 0;
500
break;
501
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
502
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
503
case MBOX_MSG_CPT_LF_RESET:
504
case MBOX_MSG_LMTST_TBL_SETUP:
505
break;
506
507
default:
508
dev_err(dev,
509
"Unsupported msg %d received.\n", msg->id);
510
break;
511
}
512
}
513
514
static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
515
int vf_id, int size)
516
{
517
struct otx2_mbox *vfpf_mbox;
518
struct mbox_msghdr *fwd;
519
520
if (msg->id >= MBOX_MSG_MAX) {
521
dev_err(&cptpf->pdev->dev,
522
"MBOX msg with unknown ID %d\n", msg->id);
523
return;
524
}
525
if (msg->sig != OTX2_MBOX_RSP_SIG) {
526
dev_err(&cptpf->pdev->dev,
527
"MBOX msg with wrong signature %x, ID %d\n",
528
msg->sig, msg->id);
529
return;
530
}
531
vfpf_mbox = &cptpf->vfpf_mbox;
532
vf_id--;
533
if (vf_id >= cptpf->enabled_vfs) {
534
dev_err(&cptpf->pdev->dev,
535
"MBOX msg to unknown VF: %d >= %d\n",
536
vf_id, cptpf->enabled_vfs);
537
return;
538
}
539
if (msg->id == MBOX_MSG_VF_FLR)
540
return;
541
542
fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
543
if (!fwd) {
544
dev_err(&cptpf->pdev->dev,
545
"Forwarding to VF%d failed.\n", vf_id);
546
return;
547
}
548
memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
549
(uint8_t *)msg + sizeof(struct mbox_msghdr), size);
550
fwd->id = msg->id;
551
fwd->pcifunc = msg->pcifunc;
552
fwd->sig = msg->sig;
553
fwd->ver = msg->ver;
554
fwd->rc = msg->rc;
555
}
556
557
/* Handle mailbox messages received from AF */
558
void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
559
{
560
struct otx2_cptpf_dev *cptpf;
561
struct otx2_mbox *afpf_mbox;
562
struct otx2_mbox_dev *mdev;
563
struct mbox_hdr *rsp_hdr;
564
struct mbox_msghdr *msg;
565
int offset, vf_id, i;
566
567
cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
568
afpf_mbox = &cptpf->afpf_mbox;
569
mdev = &afpf_mbox->dev[0];
570
/* Sync mbox data into memory */
571
smp_wmb();
572
573
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
574
offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
575
576
for (i = 0; i < rsp_hdr->num_msgs; i++) {
577
msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
578
offset);
579
vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
580
RVU_PFVF_FUNC_MASK;
581
if (vf_id > 0)
582
forward_to_vf(cptpf, msg, vf_id,
583
msg->next_msgoff - offset);
584
else
585
process_afpf_mbox_msg(cptpf, msg);
586
587
offset = msg->next_msgoff;
588
/* Sync VF response ready to be sent */
589
smp_wmb();
590
mdev->msgs_acked++;
591
}
592
otx2_mbox_reset(afpf_mbox, 0);
593
}
594
595
static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
596
struct mbox_msghdr *msg)
597
{
598
struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
599
struct otx2_cptlfs_info *lfs = &cptpf->lfs;
600
struct msg_rsp *rsp;
601
602
if (cptpf->lfs.lfs_num)
603
lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
604
&lfs->lf[0]);
605
606
rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
607
sizeof(*rsp));
608
if (!rsp)
609
return;
610
611
rsp->hdr.id = msg->id;
612
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
613
rsp->hdr.pcifunc = 0;
614
rsp->hdr.rc = 0;
615
}
616
617
static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
618
struct mbox_msghdr *msg)
619
{
620
if (msg->id >= MBOX_MSG_MAX) {
621
dev_err(&cptpf->pdev->dev,
622
"MBOX msg with unknown ID %d\n", msg->id);
623
return;
624
}
625
626
switch (msg->id) {
627
case MBOX_MSG_CPT_INST_LMTST:
628
handle_msg_cpt_inst_lmtst(cptpf, msg);
629
break;
630
default:
631
otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
632
}
633
}
634
635
void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
636
{
637
struct otx2_cptpf_dev *cptpf;
638
struct otx2_mbox_dev *mdev;
639
struct mbox_hdr *rsp_hdr;
640
struct mbox_msghdr *msg;
641
struct otx2_mbox *mbox;
642
int offset, i;
643
644
cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
645
mbox = &cptpf->afpf_mbox_up;
646
mdev = &mbox->dev[0];
647
/* Sync mbox data into memory */
648
smp_wmb();
649
650
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
651
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
652
653
for (i = 0; i < rsp_hdr->num_msgs; i++) {
654
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
655
656
process_afpf_mbox_up_msg(cptpf, msg);
657
658
offset = mbox->rx_start + msg->next_msgoff;
659
}
660
otx2_mbox_msg_send(mbox, 0);
661
}
662
663