Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/bnxt/bnxt_re/qplib_fp.c
106601 views
1
/*
2
* Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
3
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
*
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in
13
* the documentation and/or other materials provided with the
14
* distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
*
28
* Description: Fast Path Operators
29
*/
30
31
#include <linux/interrupt.h>
32
#include <linux/spinlock.h>
33
#include <linux/sched.h>
34
#include <linux/slab.h>
35
#include <linux/pci.h>
36
#include <linux/delay.h>
37
#include <linux/if_ether.h>
38
#include <linux/hardirq.h>
39
#include <rdma/ib_mad.h>
40
41
#include "hsi_struct_def.h"
42
#include "qplib_tlv.h"
43
#include "qplib_res.h"
44
#include "qplib_rcfw.h"
45
#include "qplib_sp.h"
46
#include "qplib_fp.h"
47
#include "ib_verbs.h"
48
49
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
50
51
static void bnxt_re_legacy_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
52
{
53
qp->sq.condition = false;
54
qp->sq.legacy_send_phantom = false;
55
qp->sq.single = false;
56
}
57
58
static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
59
{
60
struct bnxt_qplib_cq *scq, *rcq;
61
62
scq = qp->scq;
63
rcq = qp->rcq;
64
65
if (!qp->sq.flushed) {
66
dev_dbg(&scq->hwq.pdev->dev,
67
"QPLIB: FP: Adding to SQ Flush list = %p\n",
68
qp);
69
bnxt_re_legacy_cancel_phantom_processing(qp);
70
list_add_tail(&qp->sq_flush, &scq->sqf_head);
71
qp->sq.flushed = true;
72
}
73
if (!qp->srq) {
74
if (!qp->rq.flushed) {
75
dev_dbg(&rcq->hwq.pdev->dev,
76
"QPLIB: FP: Adding to RQ Flush list = %p\n",
77
qp);
78
list_add_tail(&qp->rq_flush, &rcq->rqf_head);
79
qp->rq.flushed = true;
80
}
81
}
82
}
83
84
static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp)
85
__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
86
{
87
/* Interrupts are already disabled in calling functions */
88
spin_lock(&qp->scq->flush_lock);
89
if (qp->scq == qp->rcq)
90
__acquire(&qp->rcq->flush_lock);
91
else
92
spin_lock(&qp->rcq->flush_lock);
93
}
94
95
static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp)
96
__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
97
{
98
if (qp->scq == qp->rcq)
99
__release(&qp->rcq->flush_lock);
100
else
101
spin_unlock(&qp->rcq->flush_lock);
102
spin_unlock(&qp->scq->flush_lock);
103
}
104
105
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
106
{
107
108
bnxt_qplib_acquire_cq_flush_locks(qp);
109
__bnxt_qplib_add_flush_qp(qp);
110
bnxt_qplib_release_cq_flush_locks(qp);
111
}
112
113
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
114
{
115
if (qp->sq.flushed) {
116
qp->sq.flushed = false;
117
list_del(&qp->sq_flush);
118
}
119
if (!qp->srq) {
120
if (qp->rq.flushed) {
121
qp->rq.flushed = false;
122
list_del(&qp->rq_flush);
123
}
124
}
125
}
126
127
void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
128
{
129
130
bnxt_qplib_acquire_cq_flush_locks(qp);
131
__clean_cq(qp->scq, (u64)(unsigned long)qp);
132
qp->sq.hwq.prod = 0;
133
qp->sq.hwq.cons = 0;
134
qp->sq.swq_start = 0;
135
qp->sq.swq_last = 0;
136
__clean_cq(qp->rcq, (u64)(unsigned long)qp);
137
qp->rq.hwq.prod = 0;
138
qp->rq.hwq.cons = 0;
139
qp->rq.swq_start = 0;
140
qp->rq.swq_last = 0;
141
142
__bnxt_qplib_del_flush_qp(qp);
143
bnxt_qplib_release_cq_flush_locks(qp);
144
}
145
146
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
147
{
148
struct bnxt_qplib_nq_work *nq_work =
149
container_of(work, struct bnxt_qplib_nq_work, work);
150
151
struct bnxt_qplib_cq *cq = nq_work->cq;
152
struct bnxt_qplib_nq *nq = nq_work->nq;
153
154
if (cq && nq) {
155
spin_lock_bh(&cq->compl_lock);
156
if (nq->cqn_handler) {
157
dev_dbg(&nq->res->pdev->dev,
158
"%s:Trigger cq = %p event nq = %p\n",
159
__func__, cq, nq);
160
nq->cqn_handler(nq, cq);
161
}
162
spin_unlock_bh(&cq->compl_lock);
163
}
164
kfree(nq_work);
165
}
166
167
static void bnxt_qplib_put_hdr_buf(struct pci_dev *pdev,
168
struct bnxt_qplib_hdrbuf *buf)
169
{
170
dma_free_coherent(&pdev->dev, buf->len, buf->va, buf->dma_map);
171
kfree(buf);
172
}
173
174
static void *bnxt_qplib_get_hdr_buf(struct pci_dev *pdev, u32 step, u32 cnt)
175
{
176
struct bnxt_qplib_hdrbuf *hdrbuf;
177
u32 len;
178
179
hdrbuf = kmalloc(sizeof(*hdrbuf), GFP_KERNEL);
180
if (!hdrbuf)
181
return NULL;
182
183
len = ALIGN((step * cnt), PAGE_SIZE);
184
hdrbuf->va = dma_alloc_coherent(&pdev->dev, len,
185
&hdrbuf->dma_map, GFP_KERNEL);
186
if (!hdrbuf->va)
187
goto out;
188
189
hdrbuf->len = len;
190
hdrbuf->step = step;
191
return hdrbuf;
192
out:
193
kfree(hdrbuf);
194
return NULL;
195
}
196
197
void bnxt_qplib_free_hdr_buf(struct bnxt_qplib_res *res,
198
struct bnxt_qplib_qp *qp)
199
{
200
if (qp->rq_hdr_buf) {
201
bnxt_qplib_put_hdr_buf(res->pdev, qp->rq_hdr_buf);
202
qp->rq_hdr_buf = NULL;
203
}
204
205
if (qp->sq_hdr_buf) {
206
bnxt_qplib_put_hdr_buf(res->pdev, qp->sq_hdr_buf);
207
qp->sq_hdr_buf = NULL;
208
}
209
}
210
211
int bnxt_qplib_alloc_hdr_buf(struct bnxt_qplib_res *res,
212
struct bnxt_qplib_qp *qp, u32 sstep, u32 rstep)
213
{
214
struct pci_dev *pdev;
215
int rc = 0;
216
217
pdev = res->pdev;
218
if (sstep) {
219
qp->sq_hdr_buf = bnxt_qplib_get_hdr_buf(pdev, sstep,
220
qp->sq.max_wqe);
221
if (!qp->sq_hdr_buf) {
222
dev_err(&pdev->dev, "QPLIB: Failed to get sq_hdr_buf\n");
223
return -ENOMEM;
224
}
225
}
226
227
if (rstep) {
228
qp->rq_hdr_buf = bnxt_qplib_get_hdr_buf(pdev, rstep,
229
qp->rq.max_wqe);
230
if (!qp->rq_hdr_buf) {
231
rc = -ENOMEM;
232
dev_err(&pdev->dev, "QPLIB: Failed to get rq_hdr_buf\n");
233
goto fail;
234
}
235
}
236
237
return 0;
238
fail:
239
bnxt_qplib_free_hdr_buf(res, qp);
240
return rc;
241
}
242
243
/*
244
* clean_nq - Invalidate cqe from given nq.
245
* @cq - Completion queue
246
*
247
* Traverse whole notification queue and invalidate any completion
248
* associated cq handler provided by caller.
249
* Note - This function traverse the hardware queue but do not update
250
* consumer index. Invalidated cqe(marked from this function) will be
251
* ignored from actual completion of notification queue.
252
*/
253
static void clean_nq(struct bnxt_qplib_cq *cq)
254
{
255
struct bnxt_qplib_hwq *nq_hwq = NULL;
256
struct bnxt_qplib_nq *nq = NULL;
257
struct nq_base *hw_nqe = NULL;
258
struct nq_cn *nqcne = NULL;
259
u32 peek_flags, peek_cons;
260
u64 q_handle;
261
u32 type;
262
int i;
263
264
nq = cq->nq;
265
nq_hwq = &nq->hwq;
266
267
spin_lock_bh(&nq_hwq->lock);
268
peek_flags = nq->nq_db.dbinfo.flags;
269
peek_cons = nq_hwq->cons;
270
for (i = 0; i < nq_hwq->max_elements; i++) {
271
hw_nqe = bnxt_qplib_get_qe(nq_hwq, peek_cons, NULL);
272
if (!NQE_CMP_VALID(hw_nqe, peek_flags))
273
break;
274
275
/* The valid test of the entry must be done first
276
* before reading any further.
277
*/
278
dma_rmb();
279
type = le16_to_cpu(hw_nqe->info10_type) &
280
NQ_BASE_TYPE_MASK;
281
282
/* Processing only NQ_BASE_TYPE_CQ_NOTIFICATION */
283
if (type == NQ_BASE_TYPE_CQ_NOTIFICATION) {
284
nqcne = (struct nq_cn *)hw_nqe;
285
286
q_handle = le32_to_cpu(nqcne->cq_handle_low);
287
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32;
288
if (q_handle == (u64)cq) {
289
nqcne->cq_handle_low = 0;
290
nqcne->cq_handle_high = 0;
291
cq->cnq_events++;
292
}
293
}
294
bnxt_qplib_hwq_incr_cons(nq_hwq->max_elements, &peek_cons,
295
1, &peek_flags);
296
}
297
spin_unlock_bh(&nq_hwq->lock);
298
}
299
300
/*
301
* Wait for receiving all NQEs for this CQ.
302
* clean_nq is tried 100 times, each time clean_cq
303
* loops upto budget times. budget is based on the
304
* number of CQs shared by that NQ. So any NQE from
305
* CQ would be already in the NQ.
306
*/
307
static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
308
{
309
u32 retry_cnt = 100;
310
u16 total_events;
311
312
if (!cnq_events) {
313
clean_nq(cq);
314
return;
315
}
316
while (retry_cnt--) {
317
total_events = cq->cnq_events;
318
319
/* Increment total_events by 1 if any CREQ event received with CQ notification */
320
if (cq->is_cq_err_event)
321
total_events++;
322
323
if (cnq_events == total_events) {
324
dev_dbg(&cq->nq->res->pdev->dev,
325
"QPLIB: NQ cleanup - Received all NQ events\n");
326
return;
327
}
328
msleep(1);
329
clean_nq(cq);
330
}
331
}
332
333
static void bnxt_qplib_service_nq(unsigned long data)
334
{
335
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
336
struct bnxt_qplib_hwq *nq_hwq = &nq->hwq;
337
int budget = nq->budget;
338
struct bnxt_qplib_res *res;
339
struct bnxt_qplib_cq *cq;
340
struct pci_dev *pdev;
341
struct nq_base *nqe;
342
u32 hw_polled = 0;
343
u64 q_handle;
344
u32 type;
345
346
res = nq->res;
347
pdev = res->pdev;
348
349
spin_lock_bh(&nq_hwq->lock);
350
/* Service the NQ until empty or budget expired */
351
while (budget--) {
352
nqe = bnxt_qplib_get_qe(nq_hwq, nq_hwq->cons, NULL);
353
if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
354
break;
355
/* The valid test of the entry must be done first before
356
* reading any further.
357
*/
358
dma_rmb();
359
type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
360
switch (type) {
361
case NQ_BASE_TYPE_CQ_NOTIFICATION:
362
{
363
struct nq_cn *nqcne = (struct nq_cn *)nqe;
364
365
q_handle = le32_to_cpu(nqcne->cq_handle_low);
366
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32;
367
cq = (struct bnxt_qplib_cq *)q_handle;
368
if (!cq)
369
break;
370
cq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
371
cq->dbinfo.toggle = cq->toggle;
372
bnxt_qplib_armen_db(&cq->dbinfo,
373
DBC_DBC_TYPE_CQ_ARMENA);
374
spin_lock_bh(&cq->compl_lock);
375
atomic_set(&cq->arm_state, 0) ;
376
if (!nq->cqn_handler(nq, (cq)))
377
nq->stats.num_cqne_processed++;
378
else
379
dev_warn(&pdev->dev,
380
"QPLIB: cqn - type 0x%x not handled\n",
381
type);
382
cq->cnq_events++;
383
spin_unlock_bh(&cq->compl_lock);
384
break;
385
}
386
case NQ_BASE_TYPE_SRQ_EVENT:
387
{
388
struct bnxt_qplib_srq *srq;
389
struct nq_srq_event *nqsrqe =
390
(struct nq_srq_event *)nqe;
391
u8 toggle;
392
393
q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
394
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) << 32;
395
srq = (struct bnxt_qplib_srq *)q_handle;
396
toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
397
>> NQ_CN_TOGGLE_SFT;
398
srq->dbinfo.toggle = toggle;
399
bnxt_qplib_armen_db(&srq->dbinfo,
400
DBC_DBC_TYPE_SRQ_ARMENA);
401
if (!nq->srqn_handler(nq,
402
(struct bnxt_qplib_srq *)q_handle,
403
nqsrqe->event))
404
nq->stats.num_srqne_processed++;
405
else
406
dev_warn(&pdev->dev,
407
"QPLIB: SRQ event 0x%x not handled\n",
408
nqsrqe->event);
409
break;
410
}
411
default:
412
dev_warn(&pdev->dev,
413
"QPLIB: nqe with opcode = 0x%x not handled\n",
414
type);
415
break;
416
}
417
hw_polled++;
418
bnxt_qplib_hwq_incr_cons(nq_hwq->max_elements, &nq_hwq->cons,
419
1, &nq->nq_db.dbinfo.flags);
420
}
421
nqe = bnxt_qplib_get_qe(nq_hwq, nq_hwq->cons, NULL);
422
if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) {
423
nq->stats.num_nq_rearm++;
424
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
425
} else if (nq->requested) {
426
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
427
nq->stats.num_tasklet_resched++;
428
}
429
dev_dbg(&pdev->dev, "QPLIB: cqn/srqn/dbqn \n");
430
if (hw_polled >= 0)
431
dev_dbg(&pdev->dev,
432
"QPLIB: serviced %llu/%llu/%llu budget 0x%x reaped 0x%x\n",
433
nq->stats.num_cqne_processed, nq->stats.num_srqne_processed,
434
nq->stats.num_dbqne_processed, budget, hw_polled);
435
dev_dbg(&pdev->dev,
436
"QPLIB: resched_cnt = %llu arm_count = %llu\n",
437
nq->stats.num_tasklet_resched, nq->stats.num_nq_rearm);
438
spin_unlock_bh(&nq_hwq->lock);
439
}
440
441
static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
442
{
443
struct bnxt_qplib_nq *nq = dev_instance;
444
struct bnxt_qplib_hwq *nq_hwq = &nq->hwq;
445
u32 sw_cons;
446
447
/* Prefetch the NQ element */
448
sw_cons = HWQ_CMP(nq_hwq->cons, nq_hwq);
449
if (sw_cons >= 0)
450
prefetch(bnxt_qplib_get_qe(nq_hwq, sw_cons, NULL));
451
452
bnxt_qplib_service_nq((unsigned long)nq);
453
454
return IRQ_HANDLED;
455
}
456
457
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
458
{
459
struct bnxt_qplib_res *res;
460
461
if (!nq->requested)
462
return;
463
464
nq->requested = false;
465
res = nq->res;
466
/* Mask h/w interrupt */
467
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, false);
468
/* Sync with last running IRQ handler */
469
synchronize_irq(nq->msix_vec);
470
free_irq(nq->msix_vec, nq);
471
kfree(nq->name);
472
nq->name = NULL;
473
}
474
475
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
476
{
477
if (nq->cqn_wq) {
478
destroy_workqueue(nq->cqn_wq);
479
nq->cqn_wq = NULL;
480
}
481
/* Make sure the HW is stopped! */
482
bnxt_qplib_nq_stop_irq(nq, true);
483
484
nq->nq_db.reg.bar_reg = NULL;
485
nq->nq_db.db = NULL;
486
487
nq->cqn_handler = NULL;
488
nq->srqn_handler = NULL;
489
nq->msix_vec = 0;
490
}
491
492
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
493
int msix_vector, bool need_init)
494
{
495
struct bnxt_qplib_res *res;
496
int rc;
497
498
res = nq->res;
499
if (nq->requested)
500
return -EFAULT;
501
502
nq->msix_vec = msix_vector;
503
nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s\n",
504
nq_indx, pci_name(res->pdev));
505
if (!nq->name)
506
return -ENOMEM;
507
rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
508
if (rc) {
509
kfree(nq->name);
510
nq->name = NULL;
511
return rc;
512
}
513
nq->requested = true;
514
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
515
516
return rc;
517
}
518
519
static void bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
520
{
521
struct bnxt_qplib_reg_desc *dbreg;
522
struct bnxt_qplib_nq_db *nq_db;
523
struct bnxt_qplib_res *res;
524
525
nq_db = &nq->nq_db;
526
res = nq->res;
527
dbreg = &res->dpi_tbl.ucreg;
528
529
nq_db->reg.bar_id = dbreg->bar_id;
530
nq_db->reg.bar_base = dbreg->bar_base;
531
nq_db->reg.bar_reg = dbreg->bar_reg + reg_offt;
532
nq_db->reg.len = _is_chip_gen_p5_p7(res->cctx) ? sizeof(u64) :
533
sizeof(u32);
534
535
nq_db->dbinfo.db = nq_db->reg.bar_reg;
536
nq_db->dbinfo.hwq = &nq->hwq;
537
nq_db->dbinfo.xid = nq->ring_id;
538
nq_db->dbinfo.seed = nq->ring_id;
539
nq_db->dbinfo.flags = 0;
540
spin_lock_init(&nq_db->dbinfo.lock);
541
nq_db->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
542
nq_db->dbinfo.res = nq->res;
543
544
return;
545
}
546
547
int bnxt_qplib_enable_nq(struct bnxt_qplib_nq *nq, int nq_idx,
548
int msix_vector, int bar_reg_offset,
549
cqn_handler_t cqn_handler,
550
srqn_handler_t srqn_handler)
551
{
552
struct pci_dev *pdev;
553
int rc;
554
555
pdev = nq->res->pdev;
556
nq->cqn_handler = cqn_handler;
557
nq->srqn_handler = srqn_handler;
558
nq->load = 0;
559
mutex_init(&nq->lock);
560
561
/* Have a task to schedule CQ notifiers in post send case */
562
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq\n");
563
if (!nq->cqn_wq)
564
return -ENOMEM;
565
566
bnxt_qplib_map_nq_db(nq, bar_reg_offset);
567
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
568
if (rc) {
569
dev_err(&pdev->dev,
570
"QPLIB: Failed to request irq for nq-idx %d\n", nq_idx);
571
goto fail;
572
}
573
dev_dbg(&pdev->dev, "QPLIB: NQ max = 0x%x\n", nq->hwq.max_elements);
574
575
return 0;
576
fail:
577
bnxt_qplib_disable_nq(nq);
578
return rc;
579
}
580
581
void bnxt_qplib_free_nq_mem(struct bnxt_qplib_nq *nq)
582
{
583
if (nq->hwq.max_elements) {
584
bnxt_qplib_free_hwq(nq->res, &nq->hwq);
585
nq->hwq.max_elements = 0;
586
}
587
}
588
589
int bnxt_qplib_alloc_nq_mem(struct bnxt_qplib_res *res,
590
struct bnxt_qplib_nq *nq)
591
{
592
struct bnxt_qplib_hwq_attr hwq_attr = {};
593
struct bnxt_qplib_sg_info sginfo = {};
594
595
nq->res = res;
596
if (!nq->hwq.max_elements ||
597
nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
598
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
599
600
sginfo.pgsize = PAGE_SIZE;
601
sginfo.pgshft = PAGE_SHIFT;
602
hwq_attr.res = res;
603
hwq_attr.sginfo = &sginfo;
604
hwq_attr.depth = nq->hwq.max_elements;
605
hwq_attr.stride = sizeof(struct nq_base);
606
hwq_attr.type = _get_hwq_type(res);
607
if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
608
dev_err(&res->pdev->dev, "QPLIB: FP NQ allocation failed\n");
609
return -ENOMEM;
610
}
611
nq->budget = 8;
612
return 0;
613
}
614
615
/* SRQ */
616
static int __qplib_destroy_srq(struct bnxt_qplib_rcfw *rcfw,
617
struct bnxt_qplib_srq *srq)
618
{
619
struct creq_destroy_srq_resp resp = {};
620
struct bnxt_qplib_cmdqmsg msg = {};
621
struct cmdq_destroy_srq req = {};
622
/* Configure the request */
623
req.srq_cid = cpu_to_le32(srq->id);
624
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_SRQ,
625
sizeof(req));
626
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
627
sizeof(resp), 0);
628
return bnxt_qplib_rcfw_send_message(rcfw, &msg);
629
}
630
631
int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
632
struct bnxt_qplib_srq *srq)
633
{
634
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
635
int rc;
636
637
rc = __qplib_destroy_srq(rcfw, srq);
638
if (rc)
639
return rc;
640
bnxt_qplib_free_hwq(res, &srq->hwq);
641
kfree(srq->swq);
642
return 0;
643
}
644
645
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
646
struct bnxt_qplib_srq *srq)
647
{
648
struct bnxt_qplib_hwq_attr hwq_attr = {};
649
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
650
struct creq_create_srq_resp resp = {};
651
struct bnxt_qplib_cmdqmsg msg = {};
652
struct cmdq_create_srq req = {};
653
u16 pg_sz_lvl = 0;
654
u16 srq_size;
655
int rc, idx;
656
657
hwq_attr.res = res;
658
hwq_attr.sginfo = &srq->sginfo;
659
hwq_attr.depth = srq->max_wqe;
660
hwq_attr.stride = srq->wqe_size;
661
hwq_attr.type = HWQ_TYPE_QUEUE;
662
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
663
if (rc)
664
goto exit;
665
/* Configure the request */
666
req.dpi = cpu_to_le32(srq->dpi->dpi);
667
req.srq_handle = cpu_to_le64((uintptr_t)srq);
668
srq_size = min_t(u32, srq->hwq.depth, U16_MAX);
669
req.srq_size = cpu_to_le16(srq_size);
670
pg_sz_lvl |= (_get_base_pg_size(&srq->hwq) <<
671
CMDQ_CREATE_SRQ_PG_SIZE_SFT);
672
pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK);
673
req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
674
req.pbl = cpu_to_le64(_get_base_addr(&srq->hwq));
675
req.pd_id = cpu_to_le32(srq->pd->id);
676
req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
677
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_SRQ,
678
sizeof(req));
679
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
680
sizeof(resp), 0);
681
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
682
if (rc)
683
goto fail;
684
if (!srq->is_user) {
685
srq->swq = kcalloc(srq->hwq.depth, sizeof(*srq->swq),
686
GFP_KERNEL);
687
if (!srq->swq)
688
goto srq_fail;
689
srq->start_idx = 0;
690
srq->last_idx = srq->hwq.depth - 1;
691
for (idx = 0; idx < srq->hwq.depth; idx++)
692
srq->swq[idx].next_idx = idx + 1;
693
srq->swq[srq->last_idx].next_idx = -1;
694
}
695
696
spin_lock_init(&srq->lock);
697
srq->id = le32_to_cpu(resp.xid);
698
srq->cctx = res->cctx;
699
srq->dbinfo.hwq = &srq->hwq;
700
srq->dbinfo.xid = srq->id;
701
srq->dbinfo.db = srq->dpi->dbr;
702
srq->dbinfo.max_slot = 1;
703
srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
704
srq->dbinfo.flags = 0;
705
spin_lock_init(&srq->dbinfo.lock);
706
srq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
707
srq->dbinfo.shadow_key_arm_ena = BNXT_QPLIB_DBR_KEY_INVALID;
708
srq->dbinfo.res = res;
709
srq->dbinfo.seed = srq->id;
710
if (srq->threshold)
711
bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
712
srq->arm_req = false;
713
return 0;
714
srq_fail:
715
__qplib_destroy_srq(rcfw, srq);
716
fail:
717
bnxt_qplib_free_hwq(res, &srq->hwq);
718
exit:
719
return rc;
720
}
721
722
int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
723
struct bnxt_qplib_srq *srq)
724
{
725
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
726
u32 avail = 0;
727
728
avail = __bnxt_qplib_get_avail(srq_hwq);
729
if (avail <= srq->threshold) {
730
srq->arm_req = false;
731
bnxt_qplib_srq_arm_db(&srq->dbinfo);
732
} else {
733
/* Deferred arming */
734
srq->arm_req = true;
735
}
736
return 0;
737
}
738
739
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
740
struct bnxt_qplib_srq *srq)
741
{
742
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
743
struct creq_query_srq_resp resp = {};
744
struct bnxt_qplib_cmdqmsg msg = {};
745
struct creq_query_srq_resp_sb *sb;
746
struct bnxt_qplib_rcfw_sbuf sbuf;
747
struct cmdq_query_srq req = {};
748
int rc = 0;
749
750
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_SRQ,
751
sizeof(req));
752
sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
753
sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
754
&sbuf.dma_addr, GFP_KERNEL);
755
if (!sbuf.sb)
756
return -ENOMEM;
757
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
758
req.srq_cid = cpu_to_le32(srq->id);
759
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
760
sizeof(resp), 0);
761
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
762
/* TODO: What to do with the query? */
763
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
764
sbuf.sb, sbuf.dma_addr);
765
766
return rc;
767
}
768
769
int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
770
struct bnxt_qplib_swqe *wqe)
771
{
772
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
773
struct sq_sge *hw_sge;
774
struct rq_wqe *srqe;
775
int i, rc = 0, next;
776
u32 avail;
777
778
spin_lock(&srq_hwq->lock);
779
if (srq->start_idx == srq->last_idx) {
780
dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!\n",
781
srq->id);
782
rc = -EINVAL;
783
spin_unlock(&srq_hwq->lock);
784
goto done;
785
}
786
next = srq->start_idx;
787
srq->start_idx = srq->swq[next].next_idx;
788
spin_unlock(&srq_hwq->lock);
789
790
srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
791
memset(srqe, 0, srq->wqe_size);
792
/* Calculate wqe_size and data_len */
793
for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
794
i < wqe->num_sge; i++, hw_sge++) {
795
hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
796
hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
797
hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
798
}
799
srqe->wqe_type = wqe->type;
800
srqe->flags = wqe->flags;
801
srqe->wqe_size = wqe->num_sge +
802
((offsetof(typeof(*srqe), data) + 15) >> 4);
803
if (!wqe->num_sge)
804
srqe->wqe_size++;
805
srqe->wr_id |= cpu_to_le32((u32)next);
806
srq->swq[next].wr_id = wqe->wr_id;
807
bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
808
/* retaining srq_hwq->cons for this logic actually the lock is only
809
* required to read srq_hwq->cons.
810
*/
811
spin_lock(&srq_hwq->lock);
812
avail = __bnxt_qplib_get_avail(srq_hwq);
813
spin_unlock(&srq_hwq->lock);
814
/* Ring DB */
815
bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
816
if (srq->arm_req && avail <= srq->threshold) {
817
srq->arm_req = false;
818
bnxt_qplib_srq_arm_db(&srq->dbinfo);
819
}
820
done:
821
return rc;
822
}
823
824
/* QP */
825
static int __qplib_destroy_qp(struct bnxt_qplib_rcfw *rcfw,
826
struct bnxt_qplib_qp *qp)
827
{
828
struct creq_destroy_qp_resp resp = {};
829
struct bnxt_qplib_cmdqmsg msg = {};
830
struct cmdq_destroy_qp req = {};
831
832
req.qp_cid = cpu_to_le32(qp->id);
833
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_QP,
834
sizeof(req));
835
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
836
sizeof(resp), 0);
837
return bnxt_qplib_rcfw_send_message(rcfw, &msg);
838
}
839
840
static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
841
{
842
int rc = 0;
843
int indx;
844
845
que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
846
if (!que->swq) {
847
rc = -ENOMEM;
848
goto out;
849
}
850
851
que->swq_start = 0;
852
que->swq_last = que->max_sw_wqe - 1;
853
for (indx = 0; indx < que->max_sw_wqe; indx++)
854
que->swq[indx].next_idx = indx + 1;
855
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
856
que->swq_last = 0;
857
out:
858
return rc;
859
}
860
861
static struct bnxt_qplib_swq *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que,
862
u32 *swq_idx)
863
{
864
u32 idx;
865
866
idx = que->swq_start;
867
if (swq_idx)
868
*swq_idx = idx;
869
return &que->swq[idx];
870
}
871
872
static void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
873
{
874
que->swq_start = que->swq[idx].next_idx;
875
}
876
877
static u32 bnxt_qplib_get_stride(void)
878
{
879
return sizeof(struct sq_sge);
880
}
881
882
u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
883
{
884
u32 slots;
885
886
/* Queue depth is the number of slots. */
887
slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
888
/* For variable WQE mode, need to align the slots to 256 */
889
if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
890
slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
891
return slots;
892
}
893
894
static u32 _set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
895
{
896
/* For Variable mode supply number of 16B slots */
897
return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
898
que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
899
}
900
901
static u32 _set_sq_max_slot(u8 wqe_mode)
902
{
903
/* for static mode index divisor is 8 */
904
return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
905
sizeof(struct sq_send) / sizeof(struct sq_sge) : 1;
906
}
907
908
static u32 _set_rq_max_slot(struct bnxt_qplib_q *que)
909
{
910
return (que->wqe_size / sizeof(struct sq_sge));
911
}
912
913
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
914
{
915
struct bnxt_qplib_hwq_attr hwq_attr = {};
916
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
917
struct creq_create_qp1_resp resp = {};
918
struct bnxt_qplib_cmdqmsg msg = {};
919
struct bnxt_qplib_q *sq = &qp->sq;
920
struct bnxt_qplib_q *rq = &qp->rq;
921
struct cmdq_create_qp1 req = {};
922
struct bnxt_qplib_reftbl *tbl;
923
unsigned long flag;
924
u8 pg_sz_lvl = 0;
925
u32 qp_flags = 0;
926
int rc;
927
928
/* General */
929
req.type = qp->type;
930
req.dpi = cpu_to_le32(qp->dpi->dpi);
931
req.qp_handle = cpu_to_le64(qp->qp_handle);
932
/* SQ */
933
hwq_attr.res = res;
934
hwq_attr.sginfo = &sq->sginfo;
935
hwq_attr.stride = bnxt_qplib_get_stride();
936
hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
937
hwq_attr.type = HWQ_TYPE_QUEUE;
938
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
939
if (rc)
940
goto exit;
941
942
req.sq_size = cpu_to_le32(_set_sq_size(sq, qp->wqe_mode));
943
req.sq_pbl = cpu_to_le64(_get_base_addr(&sq->hwq));
944
pg_sz_lvl = _get_base_pg_size(&sq->hwq) <<
945
CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT;
946
pg_sz_lvl |= ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK) <<
947
CMDQ_CREATE_QP1_SQ_LVL_SFT);
948
req.sq_pg_size_sq_lvl = pg_sz_lvl;
949
req.sq_fwo_sq_sge = cpu_to_le16(((0 << CMDQ_CREATE_QP1_SQ_FWO_SFT) &
950
CMDQ_CREATE_QP1_SQ_FWO_MASK) |
951
(sq->max_sge &
952
CMDQ_CREATE_QP1_SQ_SGE_MASK));
953
req.scq_cid = cpu_to_le32(qp->scq->id);
954
955
/* RQ */
956
if (!qp->srq) {
957
hwq_attr.res = res;
958
hwq_attr.sginfo = &rq->sginfo;
959
hwq_attr.stride = bnxt_qplib_get_stride();
960
hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
961
hwq_attr.type = HWQ_TYPE_QUEUE;
962
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
963
if (rc)
964
goto fail_sq;
965
req.rq_size = cpu_to_le32(rq->max_wqe);
966
req.rq_pbl = cpu_to_le64(_get_base_addr(&rq->hwq));
967
pg_sz_lvl = _get_base_pg_size(&rq->hwq) <<
968
CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT;
969
pg_sz_lvl |= ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
970
CMDQ_CREATE_QP1_RQ_LVL_SFT);
971
req.rq_pg_size_rq_lvl = pg_sz_lvl;
972
req.rq_fwo_rq_sge =
973
cpu_to_le16(((0 << CMDQ_CREATE_QP1_RQ_FWO_SFT) &
974
CMDQ_CREATE_QP1_RQ_FWO_MASK) |
975
(rq->max_sge &
976
CMDQ_CREATE_QP1_RQ_SGE_MASK));
977
} else {
978
/* SRQ */
979
qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED;
980
req.srq_cid = cpu_to_le32(qp->srq->id);
981
}
982
req.rcq_cid = cpu_to_le32(qp->rcq->id);
983
984
qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
985
req.qp_flags = cpu_to_le32(qp_flags);
986
req.pd_id = cpu_to_le32(qp->pd->id);
987
988
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_QP1,
989
sizeof(req));
990
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
991
sizeof(resp), 0);
992
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
993
if (rc)
994
goto fail_rq;
995
996
rc = bnxt_qplib_alloc_init_swq(sq);
997
if (rc)
998
goto sq_swq;
999
1000
if (!qp->srq) {
1001
rc = bnxt_qplib_alloc_init_swq(rq);
1002
if (rc)
1003
goto rq_swq;
1004
}
1005
1006
qp->id = le32_to_cpu(resp.xid);
1007
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1008
qp->cctx = res->cctx;
1009
sq->dbinfo.hwq = &sq->hwq;
1010
sq->dbinfo.xid = qp->id;
1011
sq->dbinfo.db = qp->dpi->dbr;
1012
sq->dbinfo.max_slot = _set_sq_max_slot(qp->wqe_mode);
1013
sq->dbinfo.flags = 0;
1014
spin_lock_init(&sq->dbinfo.lock);
1015
sq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
1016
sq->dbinfo.res = res;
1017
if (rq->max_wqe) {
1018
rq->dbinfo.hwq = &rq->hwq;
1019
rq->dbinfo.xid = qp->id;
1020
rq->dbinfo.db = qp->dpi->dbr;
1021
rq->dbinfo.max_slot = _set_rq_max_slot(rq);
1022
rq->dbinfo.flags = 0;
1023
spin_lock_init(&rq->dbinfo.lock);
1024
rq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
1025
rq->dbinfo.res = res;
1026
}
1027
1028
tbl = &res->reftbl.qpref;
1029
spin_lock_irqsave(&tbl->lock, flag);
1030
tbl->rec[tbl->max].xid = qp->id;
1031
tbl->rec[tbl->max].handle = qp;
1032
spin_unlock_irqrestore(&tbl->lock, flag);
1033
1034
return 0;
1035
rq_swq:
1036
kfree(sq->swq);
1037
sq_swq:
1038
__qplib_destroy_qp(rcfw, qp);
1039
fail_rq:
1040
bnxt_qplib_free_hwq(res, &rq->hwq);
1041
fail_sq:
1042
bnxt_qplib_free_hwq(res, &sq->hwq);
1043
exit:
1044
return rc;
1045
}
1046
1047
static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
1048
{
1049
struct bnxt_qplib_hwq *sq_hwq;
1050
struct bnxt_qplib_q *sq;
1051
u64 fpsne, psn_pg;
1052
u16 indx_pad = 0;
1053
1054
sq = &qp->sq;
1055
sq_hwq = &sq->hwq;
1056
/* First psn entry */
1057
fpsne = (u64)bnxt_qplib_get_qe(sq_hwq, sq_hwq->depth, &psn_pg);
1058
if (!IS_ALIGNED(fpsne, PAGE_SIZE))
1059
indx_pad = (fpsne & ~PAGE_MASK) / size;
1060
sq_hwq->pad_pgofft = indx_pad;
1061
sq_hwq->pad_pg = (u64 *)psn_pg;
1062
sq_hwq->pad_stride = size;
1063
}
1064
1065
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1066
{
1067
struct bnxt_qplib_hwq_attr hwq_attr = {};
1068
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1069
struct bnxt_qplib_sg_info sginfo = {};
1070
struct creq_create_qp_resp resp = {};
1071
struct bnxt_qplib_cmdqmsg msg = {};
1072
struct bnxt_qplib_q *sq = &qp->sq;
1073
struct bnxt_qplib_q *rq = &qp->rq;
1074
struct cmdq_create_qp req = {};
1075
struct bnxt_qplib_reftbl *tbl;
1076
struct bnxt_qplib_hwq *xrrq;
1077
int rc, req_size, psn_sz;
1078
unsigned long flag;
1079
u8 pg_sz_lvl = 0;
1080
u32 qp_flags = 0;
1081
u32 qp_idx;
1082
u16 nsge;
1083
u32 sqsz;
1084
1085
qp->cctx = res->cctx;
1086
if (res->dattr) {
1087
qp->dev_cap_flags = res->dattr->dev_cap_flags;
1088
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_ext_flags2);
1089
}
1090
1091
/* General */
1092
req.type = qp->type;
1093
req.dpi = cpu_to_le32(qp->dpi->dpi);
1094
req.qp_handle = cpu_to_le64(qp->qp_handle);
1095
1096
/* SQ */
1097
if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1098
psn_sz = _is_chip_gen_p5_p7(qp->cctx) ?
1099
sizeof(struct sq_psn_search_ext) :
1100
sizeof(struct sq_psn_search);
1101
if (qp->is_host_msn_tbl) {
1102
psn_sz = sizeof(struct sq_msn_search);
1103
qp->msn = 0;
1104
}
1105
} else {
1106
psn_sz = 0;
1107
}
1108
1109
hwq_attr.res = res;
1110
hwq_attr.sginfo = &sq->sginfo;
1111
hwq_attr.stride = bnxt_qplib_get_stride();
1112
hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1113
hwq_attr.aux_stride = psn_sz;
1114
hwq_attr.aux_depth = (psn_sz) ?
1115
_set_sq_size(sq, qp->wqe_mode) : 0;
1116
/* Update msn tbl size */
1117
if (qp->is_host_msn_tbl && psn_sz) {
1118
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1119
hwq_attr.aux_depth = roundup_pow_of_two(_set_sq_size(sq, qp->wqe_mode));
1120
else
1121
hwq_attr.aux_depth = roundup_pow_of_two(_set_sq_size(sq, qp->wqe_mode)) / 2;
1122
qp->msn_tbl_sz = hwq_attr.aux_depth;
1123
qp->msn = 0;
1124
}
1125
hwq_attr.type = HWQ_TYPE_QUEUE;
1126
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1127
if (rc)
1128
goto exit;
1129
1130
sqsz = _set_sq_size(sq, qp->wqe_mode);
1131
/* 0xffff is the max sq size hw limits to */
1132
if (sqsz > BNXT_QPLIB_MAX_SQSZ) {
1133
pr_err("QPLIB: FP: QP (0x%x) exceeds sq size %d\n", qp->id, sqsz);
1134
goto fail_sq;
1135
}
1136
req.sq_size = cpu_to_le32(sqsz);
1137
req.sq_pbl = cpu_to_le64(_get_base_addr(&sq->hwq));
1138
pg_sz_lvl = _get_base_pg_size(&sq->hwq) <<
1139
CMDQ_CREATE_QP_SQ_PG_SIZE_SFT;
1140
pg_sz_lvl |= ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK) <<
1141
CMDQ_CREATE_QP_SQ_LVL_SFT);
1142
req.sq_pg_size_sq_lvl = pg_sz_lvl;
1143
req.sq_fwo_sq_sge = cpu_to_le16(((0 << CMDQ_CREATE_QP_SQ_FWO_SFT) &
1144
CMDQ_CREATE_QP_SQ_FWO_MASK) |
1145
(sq->max_sge &
1146
CMDQ_CREATE_QP_SQ_SGE_MASK));
1147
req.scq_cid = cpu_to_le32(qp->scq->id);
1148
1149
/* RQ/SRQ */
1150
if (!qp->srq) {
1151
hwq_attr.res = res;
1152
hwq_attr.sginfo = &rq->sginfo;
1153
hwq_attr.stride = bnxt_qplib_get_stride();
1154
hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1155
hwq_attr.aux_stride = 0;
1156
hwq_attr.aux_depth = 0;
1157
hwq_attr.type = HWQ_TYPE_QUEUE;
1158
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1159
if (rc)
1160
goto fail_sq;
1161
req.rq_size = cpu_to_le32(rq->max_wqe);
1162
req.rq_pbl = cpu_to_le64(_get_base_addr(&rq->hwq));
1163
pg_sz_lvl = _get_base_pg_size(&rq->hwq) <<
1164
CMDQ_CREATE_QP_RQ_PG_SIZE_SFT;
1165
pg_sz_lvl |= ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
1166
CMDQ_CREATE_QP_RQ_LVL_SFT);
1167
req.rq_pg_size_rq_lvl = pg_sz_lvl;
1168
nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1169
res->dattr->max_qp_sges : rq->max_sge;
1170
req.rq_fwo_rq_sge =
1171
cpu_to_le16(((0 << CMDQ_CREATE_QP_RQ_FWO_SFT) &
1172
CMDQ_CREATE_QP_RQ_FWO_MASK) |
1173
(nsge & CMDQ_CREATE_QP_RQ_SGE_MASK));
1174
} else {
1175
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1176
req.srq_cid = cpu_to_le32(qp->srq->id);
1177
}
1178
req.rcq_cid = cpu_to_le32(qp->rcq->id);
1179
1180
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1181
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1182
if (qp->sig_type)
1183
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1184
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1185
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1186
if (res->cctx->modes.te_bypass)
1187
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED;
1188
if (res->dattr &&
1189
bnxt_ext_stats_supported(qp->cctx, res->dattr->dev_cap_flags, res->is_vf))
1190
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1191
req.qp_flags = cpu_to_le32(qp_flags);
1192
1193
/* ORRQ and IRRQ */
1194
if (psn_sz) {
1195
xrrq = &qp->orrq;
1196
xrrq->max_elements =
1197
ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1198
req_size = xrrq->max_elements *
1199
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1200
req_size &= ~(PAGE_SIZE - 1);
1201
sginfo.pgsize = req_size;
1202
sginfo.pgshft = PAGE_SHIFT;
1203
1204
hwq_attr.res = res;
1205
hwq_attr.sginfo = &sginfo;
1206
hwq_attr.depth = xrrq->max_elements;
1207
hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1208
hwq_attr.aux_stride = 0;
1209
hwq_attr.aux_depth = 0;
1210
hwq_attr.type = HWQ_TYPE_CTX;
1211
rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1212
if (rc)
1213
goto fail_rq;
1214
req.orrq_addr = cpu_to_le64(_get_base_addr(xrrq));
1215
1216
xrrq = &qp->irrq;
1217
xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1218
qp->max_dest_rd_atomic);
1219
req_size = xrrq->max_elements *
1220
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1221
req_size &= ~(PAGE_SIZE - 1);
1222
sginfo.pgsize = req_size;
1223
hwq_attr.depth = xrrq->max_elements;
1224
hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1225
rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1226
if (rc)
1227
goto fail_orrq;
1228
req.irrq_addr = cpu_to_le64(_get_base_addr(xrrq));
1229
}
1230
req.pd_id = cpu_to_le32(qp->pd->id);
1231
1232
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_QP,
1233
sizeof(req));
1234
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1235
sizeof(resp), 0);
1236
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1237
if (rc)
1238
goto fail;
1239
1240
if (!qp->is_user) {
1241
rc = bnxt_qplib_alloc_init_swq(sq);
1242
if (rc)
1243
goto swq_sq;
1244
if (!qp->srq) {
1245
rc = bnxt_qplib_alloc_init_swq(rq);
1246
if (rc)
1247
goto swq_rq;
1248
}
1249
if (psn_sz)
1250
bnxt_qplib_init_psn_ptr(qp, psn_sz);
1251
}
1252
qp->id = le32_to_cpu(resp.xid);
1253
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1254
INIT_LIST_HEAD(&qp->sq_flush);
1255
INIT_LIST_HEAD(&qp->rq_flush);
1256
1257
sq->dbinfo.hwq = &sq->hwq;
1258
sq->dbinfo.xid = qp->id;
1259
sq->dbinfo.db = qp->dpi->dbr;
1260
sq->dbinfo.max_slot = _set_sq_max_slot(qp->wqe_mode);
1261
sq->dbinfo.flags = 0;
1262
spin_lock_init(&sq->dbinfo.lock);
1263
sq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
1264
sq->dbinfo.res = res;
1265
sq->dbinfo.seed = qp->id;
1266
if (rq->max_wqe) {
1267
rq->dbinfo.hwq = &rq->hwq;
1268
rq->dbinfo.xid = qp->id;
1269
rq->dbinfo.db = qp->dpi->dbr;
1270
rq->dbinfo.max_slot = _set_rq_max_slot(rq);
1271
rq->dbinfo.flags = 0;
1272
spin_lock_init(&rq->dbinfo.lock);
1273
rq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
1274
rq->dbinfo.res = res;
1275
rq->dbinfo.seed = qp->id;
1276
}
1277
1278
tbl = &res->reftbl.qpref;
1279
qp_idx = map_qp_id_to_tbl_indx(qp->id, tbl);
1280
spin_lock_irqsave(&tbl->lock, flag);
1281
tbl->rec[qp_idx].xid = qp->id;
1282
tbl->rec[qp_idx].handle = qp;
1283
spin_unlock_irqrestore(&tbl->lock, flag);
1284
1285
return 0;
1286
swq_rq:
1287
kfree(sq->swq);
1288
swq_sq:
1289
__qplib_destroy_qp(rcfw, qp);
1290
fail:
1291
bnxt_qplib_free_hwq(res, &qp->irrq);
1292
fail_orrq:
1293
bnxt_qplib_free_hwq(res, &qp->orrq);
1294
fail_rq:
1295
bnxt_qplib_free_hwq(res, &rq->hwq);
1296
fail_sq:
1297
bnxt_qplib_free_hwq(res, &sq->hwq);
1298
exit:
1299
return rc;
1300
}
1301
1302
static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1303
{
1304
switch (qp->cur_qp_state) {
1305
case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1306
switch (qp->state) {
1307
case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1308
break;
1309
default:
1310
break;
1311
}
1312
break;
1313
case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1314
switch (qp->state) {
1315
case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1316
if (!(qp->modify_flags &
1317
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1318
qp->modify_flags |=
1319
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1320
qp->path_mtu = CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1321
}
1322
qp->modify_flags &=
1323
~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1324
/* Bono FW requires the max_dest_rd_atomic to be >= 1 */
1325
if (qp->max_dest_rd_atomic < 1)
1326
qp->max_dest_rd_atomic = 1;
1327
qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1328
/* Bono FW 20.6.5 requires SGID_INDEX to be configured */
1329
if (!(qp->modify_flags &
1330
CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1331
qp->modify_flags |=
1332
CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1333
qp->ah.sgid_index = 0;
1334
}
1335
break;
1336
default:
1337
break;
1338
}
1339
break;
1340
case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1341
switch (qp->state) {
1342
case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1343
/* Bono FW requires the max_rd_atomic to be >= 1 */
1344
if (qp->max_rd_atomic < 1)
1345
qp->max_rd_atomic = 1;
1346
qp->modify_flags &=
1347
~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1348
CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1349
CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1350
CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1351
CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1352
CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1353
CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1354
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1355
CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1356
CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1357
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1358
CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1359
break;
1360
default:
1361
break;
1362
}
1363
break;
1364
case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1365
break;
1366
case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1367
break;
1368
case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1369
break;
1370
case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1371
break;
1372
default:
1373
break;
1374
}
1375
}
1376
1377
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1378
{
1379
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1380
struct creq_modify_qp_resp resp = {};
1381
struct bnxt_qplib_cmdqmsg msg = {};
1382
struct cmdq_modify_qp req = {};
1383
bool ppp_requested = false;
1384
u32 temp32[4];
1385
u32 bmask;
1386
int rc;
1387
1388
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MODIFY_QP,
1389
sizeof(req));
1390
1391
/* Filter out the qp_attr_mask based on the state->new transition */
1392
__filter_modify_flags(qp);
1393
bmask = qp->modify_flags;
1394
req.modify_mask = cpu_to_le32(qp->modify_flags);
1395
req.qp_cid = cpu_to_le32(qp->id);
1396
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1397
req.network_type_en_sqd_async_notify_new_state =
1398
(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1399
(qp->en_sqd_async_notify == true ?
1400
CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1401
if (__can_request_ppp(qp)) {
1402
req.path_mtu_pingpong_push_enable =
1403
CMDQ_MODIFY_QP_PINGPONG_PUSH_ENABLE;
1404
req.pingpong_push_dpi = qp->ppp.dpi;
1405
ppp_requested = true;
1406
}
1407
}
1408
req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1409
1410
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) {
1411
req.access = qp->access;
1412
}
1413
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1414
req.pkey = IB_DEFAULT_PKEY_FULL;
1415
1416
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) {
1417
req.qkey = cpu_to_le32(qp->qkey);
1418
}
1419
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1420
memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1421
req.dgid[0] = cpu_to_le32(temp32[0]);
1422
req.dgid[1] = cpu_to_le32(temp32[1]);
1423
req.dgid[2] = cpu_to_le32(temp32[2]);
1424
req.dgid[3] = cpu_to_le32(temp32[3]);
1425
}
1426
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) {
1427
req.flow_label = cpu_to_le32(qp->ah.flow_label);
1428
}
1429
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
1430
req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[qp->ah.sgid_index]);
1431
}
1432
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) {
1433
req.hop_limit = qp->ah.hop_limit;
1434
}
1435
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS) {
1436
req.traffic_class = qp->ah.traffic_class;
1437
}
1438
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC) {
1439
memcpy(req.dest_mac, qp->ah.dmac, 6);
1440
}
1441
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) {
1442
req.path_mtu_pingpong_push_enable = qp->path_mtu;
1443
}
1444
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) {
1445
req.timeout = qp->timeout;
1446
}
1447
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT) {
1448
req.retry_cnt = qp->retry_cnt;
1449
}
1450
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY) {
1451
req.rnr_retry = qp->rnr_retry;
1452
}
1453
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER) {
1454
req.min_rnr_timer = qp->min_rnr_timer;
1455
}
1456
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN) {
1457
req.rq_psn = cpu_to_le32(qp->rq.psn);
1458
}
1459
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN) {
1460
req.sq_psn = cpu_to_le32(qp->sq.psn);
1461
}
1462
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC) {
1463
req.max_rd_atomic =
1464
ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1465
}
1466
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC) {
1467
req.max_dest_rd_atomic =
1468
IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1469
}
1470
req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1471
req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1472
req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1473
req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1474
req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1475
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1476
req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1477
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC)
1478
req.enable_cc = cpu_to_le16(CMDQ_MODIFY_QP_ENABLE_CC);
1479
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN)
1480
req.tos_dscp_tos_ecn =
1481
((qp->tos_ecn << CMDQ_MODIFY_QP_TOS_ECN_SFT) &
1482
CMDQ_MODIFY_QP_TOS_ECN_MASK);
1483
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP)
1484
req.tos_dscp_tos_ecn |=
1485
((qp->tos_dscp << CMDQ_MODIFY_QP_TOS_DSCP_SFT) &
1486
CMDQ_MODIFY_QP_TOS_DSCP_MASK);
1487
req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1488
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1489
sizeof(resp), 0);
1490
msg.qp_state = qp->state;
1491
1492
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1493
if (rc == -ETIMEDOUT && (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR)) {
1494
qp->cur_qp_state = qp->state;
1495
return 0;
1496
} else if (rc) {
1497
return rc;
1498
}
1499
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR)
1500
qp->lag_src_mac = be32_to_cpu(resp.lag_src_mac);
1501
1502
if (ppp_requested)
1503
qp->ppp.st_idx_en = resp.pingpong_push_state_index_enabled;
1504
1505
qp->cur_qp_state = qp->state;
1506
return 0;
1507
}
1508
1509
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1510
{
1511
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1512
struct creq_query_qp_resp resp = {};
1513
struct bnxt_qplib_cmdqmsg msg = {};
1514
struct bnxt_qplib_rcfw_sbuf sbuf;
1515
struct creq_query_qp_resp_sb *sb;
1516
struct cmdq_query_qp req = {};
1517
u32 temp32[4];
1518
int i, rc;
1519
1520
sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1521
sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
1522
&sbuf.dma_addr, GFP_KERNEL);
1523
if (!sbuf.sb)
1524
return -ENOMEM;
1525
sb = sbuf.sb;
1526
1527
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_QP,
1528
sizeof(req));
1529
req.qp_cid = cpu_to_le32(qp->id);
1530
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1531
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1532
sizeof(resp), 0);
1533
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1534
if (rc)
1535
goto bail;
1536
1537
/* Extract the context from the side buffer */
1538
qp->state = sb->en_sqd_async_notify_state &
1539
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1540
qp->cur_qp_state = qp->state;
1541
qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1542
CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1543
true : false;
1544
qp->access = sb->access;
1545
qp->pkey_index = le16_to_cpu(sb->pkey);
1546
qp->qkey = le32_to_cpu(sb->qkey);
1547
1548
temp32[0] = le32_to_cpu(sb->dgid[0]);
1549
temp32[1] = le32_to_cpu(sb->dgid[1]);
1550
temp32[2] = le32_to_cpu(sb->dgid[2]);
1551
temp32[3] = le32_to_cpu(sb->dgid[3]);
1552
memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1553
1554
qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1555
1556
qp->ah.sgid_index = 0;
1557
for (i = 0; i < res->sgid_tbl.max; i++) {
1558
if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1559
qp->ah.sgid_index = i;
1560
break;
1561
}
1562
}
1563
if (i == res->sgid_tbl.max)
1564
dev_dbg(&res->pdev->dev,
1565
"QPLIB: SGID not found qp->id = 0x%x sgid_index = 0x%x\n",
1566
qp->id, le16_to_cpu(sb->sgid_index));
1567
1568
qp->ah.hop_limit = sb->hop_limit;
1569
qp->ah.traffic_class = sb->traffic_class;
1570
memcpy(qp->ah.dmac, sb->dest_mac, ETH_ALEN);
1571
qp->ah.vlan_id = le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1572
CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK >>
1573
CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1574
qp->path_mtu = le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1575
CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK;
1576
qp->timeout = sb->timeout;
1577
qp->retry_cnt = sb->retry_cnt;
1578
qp->rnr_retry = sb->rnr_retry;
1579
qp->min_rnr_timer = sb->min_rnr_timer;
1580
qp->rq.psn = le32_to_cpu(sb->rq_psn);
1581
qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1582
qp->sq.psn = le32_to_cpu(sb->sq_psn);
1583
qp->max_dest_rd_atomic =
1584
IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1585
qp->sq.max_wqe = qp->sq.hwq.max_elements;
1586
qp->rq.max_wqe = qp->rq.hwq.max_elements;
1587
qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1588
qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1589
qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1590
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1591
memcpy(qp->smac, sb->src_mac, ETH_ALEN);
1592
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1593
qp->port_id = le16_to_cpu(sb->port_id);
1594
bail:
1595
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1596
sbuf.sb, sbuf.dma_addr);
1597
return rc;
1598
}
1599
1600
1601
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1602
{
1603
struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1604
u32 peek_flags, peek_cons;
1605
struct cq_base *hw_cqe;
1606
int i;
1607
1608
peek_flags = cq->dbinfo.flags;
1609
peek_cons = cq_hwq->cons;
1610
for (i = 0; i < cq_hwq->depth; i++) {
1611
hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1612
if (CQE_CMP_VALID(hw_cqe, peek_flags)) {
1613
dma_rmb();
1614
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1615
case CQ_BASE_CQE_TYPE_REQ:
1616
case CQ_BASE_CQE_TYPE_TERMINAL:
1617
{
1618
struct cq_req *cqe = (struct cq_req *)hw_cqe;
1619
1620
if (qp == le64_to_cpu(cqe->qp_handle))
1621
cqe->qp_handle = 0;
1622
break;
1623
}
1624
case CQ_BASE_CQE_TYPE_RES_RC:
1625
case CQ_BASE_CQE_TYPE_RES_UD:
1626
case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1627
{
1628
struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1629
1630
if (qp == le64_to_cpu(cqe->qp_handle))
1631
cqe->qp_handle = 0;
1632
break;
1633
}
1634
default:
1635
break;
1636
}
1637
}
1638
bnxt_qplib_hwq_incr_cons(cq_hwq->depth, &peek_cons,
1639
1, &peek_flags);
1640
}
1641
}
1642
1643
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1644
struct bnxt_qplib_qp *qp)
1645
{
1646
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1647
struct bnxt_qplib_reftbl *tbl;
1648
unsigned long flags;
1649
u32 qp_idx;
1650
int rc;
1651
1652
tbl = &res->reftbl.qpref;
1653
qp_idx = map_qp_id_to_tbl_indx(qp->id, tbl);
1654
spin_lock_irqsave(&tbl->lock, flags);
1655
tbl->rec[qp_idx].xid = BNXT_QPLIB_QP_ID_INVALID;
1656
tbl->rec[qp_idx].handle = NULL;
1657
spin_unlock_irqrestore(&tbl->lock, flags);
1658
1659
rc = __qplib_destroy_qp(rcfw, qp);
1660
if (rc) {
1661
spin_lock_irqsave(&tbl->lock, flags);
1662
tbl->rec[qp_idx].xid = qp->id;
1663
tbl->rec[qp_idx].handle = qp;
1664
spin_unlock_irqrestore(&tbl->lock, flags);
1665
return rc;
1666
}
1667
1668
return 0;
1669
}
1670
1671
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1672
struct bnxt_qplib_qp *qp)
1673
{
1674
if (qp->irrq.max_elements)
1675
bnxt_qplib_free_hwq(res, &qp->irrq);
1676
if (qp->orrq.max_elements)
1677
bnxt_qplib_free_hwq(res, &qp->orrq);
1678
1679
if (!qp->is_user)
1680
kfree(qp->rq.swq);
1681
bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1682
1683
if (!qp->is_user)
1684
kfree(qp->sq.swq);
1685
bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1686
}
1687
1688
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1689
struct bnxt_qplib_sge *sge)
1690
{
1691
struct bnxt_qplib_q *sq = &qp->sq;
1692
struct bnxt_qplib_hdrbuf *buf;
1693
u32 sw_prod;
1694
1695
memset(sge, 0, sizeof(*sge));
1696
1697
buf = qp->sq_hdr_buf;
1698
if (buf) {
1699
sw_prod = sq->swq_start;
1700
sge->addr = (dma_addr_t)(buf->dma_map + sw_prod * buf->step);
1701
sge->lkey = 0xFFFFFFFF;
1702
sge->size = buf->step;
1703
return buf->va + sw_prod * sge->size;
1704
}
1705
return NULL;
1706
}
1707
1708
u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1709
{
1710
struct bnxt_qplib_q *rq = &qp->rq;
1711
1712
return rq->swq_start;
1713
}
1714
1715
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1716
struct bnxt_qplib_sge *sge)
1717
{
1718
struct bnxt_qplib_q *rq = &qp->rq;
1719
struct bnxt_qplib_hdrbuf *buf;
1720
u32 sw_prod;
1721
1722
memset(sge, 0, sizeof(*sge));
1723
1724
buf = qp->rq_hdr_buf;
1725
if (buf) {
1726
sw_prod = rq->swq_start;
1727
sge->addr = (dma_addr_t)(buf->dma_map + sw_prod * buf->step);
1728
sge->lkey = 0xFFFFFFFF;
1729
sge->size = buf->step;
1730
return buf->va + sw_prod * sge->size;
1731
}
1732
return NULL;
1733
}
1734
1735
/* Fil the MSN table into the next psn row */
1736
static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1737
struct bnxt_qplib_swqe *wqe,
1738
struct bnxt_qplib_swq *swq)
1739
{
1740
struct sq_msn_search *msns;
1741
u32 start_psn, next_psn;
1742
u16 start_idx;
1743
1744
msns = (struct sq_msn_search *)swq->psn_search;
1745
msns->start_idx_next_psn_start_psn = 0;
1746
1747
start_psn = swq->start_psn;
1748
next_psn = swq->next_psn;
1749
start_idx = swq->slot_idx;
1750
msns->start_idx_next_psn_start_psn |=
1751
bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1752
pr_debug("QP_LIB MSN %d START_IDX %u NEXT_PSN %u START_PSN %u\n",
1753
qp->msn,
1754
(u16)
1755
cpu_to_le16(BNXT_RE_MSN_IDX(msns->start_idx_next_psn_start_psn)),
1756
(u32)
1757
cpu_to_le32(BNXT_RE_MSN_NPSN(msns->start_idx_next_psn_start_psn)),
1758
(u32)
1759
cpu_to_le32(BNXT_RE_MSN_SPSN(msns->start_idx_next_psn_start_psn)));
1760
qp->msn++;
1761
qp->msn %= qp->msn_tbl_sz;
1762
}
1763
1764
static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1765
struct bnxt_qplib_swqe *wqe,
1766
struct bnxt_qplib_swq *swq)
1767
{
1768
struct sq_psn_search_ext *psns_ext;
1769
struct sq_psn_search *psns;
1770
u32 flg_npsn;
1771
u32 op_spsn;
1772
1773
if (!swq->psn_search)
1774
return;
1775
1776
/* Handle MSN differently on cap flags */
1777
if (qp->is_host_msn_tbl) {
1778
bnxt_qplib_fill_msn_search(qp, wqe, swq);
1779
return;
1780
}
1781
psns = (struct sq_psn_search *)swq->psn_search;
1782
psns_ext = (struct sq_psn_search_ext *)swq->psn_search;
1783
1784
op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1785
SQ_PSN_SEARCH_START_PSN_MASK);
1786
op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1787
SQ_PSN_SEARCH_OPCODE_MASK);
1788
flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1789
SQ_PSN_SEARCH_NEXT_PSN_MASK);
1790
1791
if (_is_chip_gen_p5_p7(qp->cctx)) {
1792
psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1793
psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1794
psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1795
} else {
1796
psns->opcode_start_psn = cpu_to_le32(op_spsn);
1797
psns->flags_next_psn = cpu_to_le32(flg_npsn);
1798
}
1799
}
1800
1801
static u16 _calc_ilsize(struct bnxt_qplib_swqe *wqe)
1802
{
1803
u16 size = 0;
1804
int indx;
1805
1806
for (indx = 0; indx < wqe->num_sge; indx++)
1807
size += wqe->sg_list[indx].size;
1808
return size;
1809
}
1810
1811
static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1812
struct bnxt_qplib_swqe *wqe,
1813
u32 *sw_prod)
1814
{
1815
struct bnxt_qplib_hwq *sq_hwq;
1816
int len, t_len, offt = 0;
1817
int t_cplen = 0, cplen;
1818
bool pull_dst = true;
1819
void *il_dst = NULL;
1820
void *il_src = NULL;
1821
int indx;
1822
1823
sq_hwq = &qp->sq.hwq;
1824
t_len = 0;
1825
for (indx = 0; indx < wqe->num_sge; indx++) {
1826
len = wqe->sg_list[indx].size;
1827
il_src = (void *)wqe->sg_list[indx].addr;
1828
t_len += len;
1829
if (t_len > qp->max_inline_data)
1830
goto bad;
1831
while (len) {
1832
if (pull_dst) {
1833
pull_dst = false;
1834
il_dst = bnxt_qplib_get_qe(sq_hwq, ((*sw_prod) %
1835
sq_hwq->depth), NULL);
1836
(*sw_prod)++;
1837
t_cplen = 0;
1838
offt = 0;
1839
}
1840
cplen = min_t(int, len, sizeof(struct sq_sge));
1841
cplen = min_t(int, cplen,
1842
(sizeof(struct sq_sge) - offt));
1843
memcpy(il_dst, il_src, cplen);
1844
t_cplen += cplen;
1845
il_src += cplen;
1846
il_dst += cplen;
1847
offt += cplen;
1848
len -= cplen;
1849
if (t_cplen == sizeof(struct sq_sge))
1850
pull_dst = true;
1851
}
1852
}
1853
1854
return t_len;
1855
bad:
1856
return -ENOMEM;
1857
}
1858
1859
static int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *sq_hwq,
1860
struct bnxt_qplib_sge *ssge,
1861
u32 nsge, u32 *sw_prod)
1862
{
1863
struct sq_sge *dsge;
1864
int indx, len = 0;
1865
1866
for (indx = 0; indx < nsge; indx++, (*sw_prod)++) {
1867
dsge = bnxt_qplib_get_qe(sq_hwq, ((*sw_prod) % sq_hwq->depth), NULL);
1868
dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1869
dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1870
dsge->size = cpu_to_le32(ssge[indx].size);
1871
len += ssge[indx].size;
1872
}
1873
return len;
1874
}
1875
1876
static u16 _calculate_wqe_byte(struct bnxt_qplib_qp *qp,
1877
struct bnxt_qplib_swqe *wqe, u16 *wqe_byte)
1878
{
1879
u16 wqe_size;
1880
u32 ilsize;
1881
u16 nsge;
1882
1883
nsge = wqe->num_sge;
1884
if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1885
ilsize = _calc_ilsize(wqe);
1886
wqe_size = (ilsize > qp->max_inline_data) ?
1887
qp->max_inline_data : ilsize;
1888
wqe_size = ALIGN(wqe_size, sizeof(struct sq_sge));
1889
} else {
1890
wqe_size = nsge * sizeof(struct sq_sge);
1891
}
1892
/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1893
wqe_size += sizeof(struct sq_send_hdr);
1894
if (wqe_byte)
1895
*wqe_byte = wqe_size;
1896
return wqe_size / sizeof(struct sq_sge);
1897
}
1898
1899
static u16 _translate_q_full_delta(struct bnxt_qplib_q *que, u16 wqe_bytes)
1900
{
1901
/* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128
1902
* For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128.
1903
* For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512.
1904
* when 8916 is disabled.
1905
*/
1906
return (que->q_full_delta * wqe_bytes) / que->hwq.element_size;
1907
}
1908
1909
static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1910
struct bnxt_qplib_swq *swq, bool is_host_msn_tbl)
1911
{
1912
struct bnxt_qplib_hwq *sq_hwq;
1913
u32 pg_num, pg_indx;
1914
void *buff;
1915
u32 tail;
1916
1917
sq_hwq = &sq->hwq;
1918
if (!sq_hwq->pad_pg)
1919
return;
1920
1921
tail = swq->slot_idx / sq->dbinfo.max_slot;
1922
if (is_host_msn_tbl) {
1923
/* For HW retx use qp msn index */
1924
tail = qp->msn;
1925
tail %= qp->msn_tbl_sz;
1926
}
1927
pg_num = (tail + sq_hwq->pad_pgofft) / (PAGE_SIZE / sq_hwq->pad_stride);
1928
pg_indx = (tail + sq_hwq->pad_pgofft) % (PAGE_SIZE / sq_hwq->pad_stride);
1929
buff = (void *)(sq_hwq->pad_pg[pg_num] + pg_indx * sq_hwq->pad_stride);
1930
/* the start ptr for buff is same ie after the SQ */
1931
swq->psn_search = buff;
1932
}
1933
1934
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1935
{
1936
struct bnxt_qplib_q *sq = &qp->sq;
1937
1938
bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1939
}
1940
1941
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1942
struct bnxt_qplib_swqe *wqe)
1943
{
1944
struct bnxt_qplib_nq_work *nq_work = NULL;
1945
int i, rc = 0, data_len = 0, pkt_num = 0;
1946
struct bnxt_qplib_q *sq = &qp->sq;
1947
struct bnxt_qplib_hwq *sq_hwq;
1948
struct bnxt_qplib_swq *swq;
1949
bool sch_handler = false;
1950
u16 slots_needed;
1951
bool msn_update;
1952
void *base_hdr;
1953
void *ext_hdr;
1954
__le32 temp32;
1955
u16 qfd_slots;
1956
u8 wqe_slots;
1957
u16 wqe_size;
1958
u32 sw_prod;
1959
u32 wqe_idx;
1960
1961
sq_hwq = &sq->hwq;
1962
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1963
qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1964
dev_err(&sq_hwq->pdev->dev,
1965
"QPLIB: FP: QP (0x%x) is in the 0x%x state\n",
1966
qp->id, qp->state);
1967
rc = -EINVAL;
1968
goto done;
1969
}
1970
1971
wqe_slots = _calculate_wqe_byte(qp, wqe, &wqe_size);
1972
slots_needed = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1973
sq->dbinfo.max_slot : wqe_slots;
1974
qfd_slots = _translate_q_full_delta(sq, wqe_size);
1975
if (bnxt_qplib_queue_full(sq_hwq, (slots_needed + qfd_slots))) {
1976
dev_err(&sq_hwq->pdev->dev,
1977
"QPLIB: FP: QP (0x%x) SQ is full!\n", qp->id);
1978
dev_err(&sq_hwq->pdev->dev,
1979
"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x slots = %#x\n",
1980
HWQ_CMP(sq_hwq->prod, sq_hwq),
1981
HWQ_CMP(sq_hwq->cons, sq_hwq),
1982
sq_hwq->max_elements, qfd_slots, slots_needed);
1983
dev_err(&sq_hwq->pdev->dev,
1984
"QPLIB: phantom_wqe_cnt: %d phantom_cqe_cnt: %d\n",
1985
sq->phantom_wqe_cnt, sq->phantom_cqe_cnt);
1986
rc = -ENOMEM;
1987
goto done;
1988
}
1989
1990
sw_prod = sq_hwq->prod;
1991
swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1992
swq->slot_idx = sw_prod;
1993
bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1994
1995
swq->wr_id = wqe->wr_id;
1996
swq->type = wqe->type;
1997
swq->flags = wqe->flags;
1998
swq->slots = slots_needed;
1999
swq->start_psn = sq->psn & BTH_PSN_MASK;
2000
if (qp->sig_type || wqe->flags & BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP)
2001
swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
2002
2003
dev_dbg(&sq_hwq->pdev->dev,
2004
"QPLIB: FP: QP(0x%x) post SQ wr_id[%d] = 0x%llx\n",
2005
qp->id, wqe_idx, swq->wr_id);
2006
if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2007
sch_handler = true;
2008
dev_dbg(&sq_hwq->pdev->dev,
2009
"%s Error QP. Scheduling for poll_cq\n", __func__);
2010
goto queue_err;
2011
}
2012
2013
base_hdr = bnxt_qplib_get_qe(sq_hwq, sw_prod, NULL);
2014
sw_prod++;
2015
ext_hdr = bnxt_qplib_get_qe(sq_hwq, (sw_prod % sq_hwq->depth), NULL);
2016
sw_prod++;
2017
memset(base_hdr, 0, sizeof(struct sq_sge));
2018
memset(ext_hdr, 0, sizeof(struct sq_sge));
2019
2020
if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
2021
data_len = bnxt_qplib_put_inline(qp, wqe, &sw_prod);
2022
else
2023
data_len = bnxt_qplib_put_sges(sq_hwq, wqe->sg_list,
2024
wqe->num_sge, &sw_prod);
2025
if (data_len < 0)
2026
goto queue_err;
2027
/* Make sure we update MSN table only for wired wqes */
2028
msn_update = true;
2029
2030
/* Specifics */
2031
switch (wqe->type) {
2032
case BNXT_QPLIB_SWQE_TYPE_SEND:
2033
if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE ||
2034
qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
2035
/* Assemble info for Raw Ethertype QPs */
2036
struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
2037
struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
2038
2039
sqe->wqe_type = wqe->type;
2040
sqe->flags = wqe->flags;
2041
sqe->wqe_size = wqe_slots;
2042
sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
2043
sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
2044
sqe->length = cpu_to_le32(data_len);
2045
ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
2046
SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
2047
SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
2048
2049
dev_dbg(&sq_hwq->pdev->dev,
2050
"QPLIB: FP: RAW/QP1 Send WQE:\n"
2051
"\twqe_type = 0x%x\n"
2052
"\tflags = 0x%x\n"
2053
"\twqe_size = 0x%x\n"
2054
"\tlflags = 0x%x\n"
2055
"\tcfa_action = 0x%x\n"
2056
"\tlength = 0x%x\n"
2057
"\tcfa_meta = 0x%x\n",
2058
sqe->wqe_type, sqe->flags, sqe->wqe_size,
2059
sqe->lflags, sqe->cfa_action,
2060
sqe->length, ext_sqe->cfa_meta);
2061
break;
2062
}
2063
fallthrough;
2064
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2065
fallthrough;
2066
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2067
{
2068
struct sq_send_hdr *sqe = base_hdr;
2069
struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
2070
2071
sqe->wqe_type = wqe->type;
2072
sqe->flags = wqe->flags;
2073
sqe->wqe_size = wqe_slots;
2074
sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
2075
if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
2076
qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
2077
sqe->q_key = cpu_to_le32(wqe->send.q_key);
2078
sqe->length = cpu_to_le32(data_len);
2079
ext_sqe->dst_qp = cpu_to_le32(
2080
wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
2081
ext_sqe->avid = cpu_to_le32(wqe->send.avid &
2082
SQ_SEND_AVID_MASK);
2083
sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
2084
msn_update = false;
2085
} else {
2086
sqe->length = cpu_to_le32(data_len);
2087
if (qp->mtu)
2088
pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2089
if (!pkt_num)
2090
pkt_num = 1;
2091
sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2092
}
2093
dev_dbg(&sq_hwq->pdev->dev,
2094
"QPLIB: FP: Send WQE:\n"
2095
"\twqe_type = 0x%x\n"
2096
"\tflags = 0x%x\n"
2097
"\twqe_size = 0x%x\n"
2098
"\tinv_key/immdata = 0x%x\n"
2099
"\tq_key = 0x%x\n"
2100
"\tdst_qp = 0x%x\n"
2101
"\tlength = 0x%x\n"
2102
"\tavid = 0x%x\n",
2103
sqe->wqe_type, sqe->flags, sqe->wqe_size,
2104
sqe->inv_key_or_imm_data, sqe->q_key, ext_sqe->dst_qp,
2105
sqe->length, ext_sqe->avid);
2106
break;
2107
}
2108
case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2109
/* fall-thru */
2110
case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2111
/* fall-thru */
2112
case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2113
{
2114
struct sq_rdma_hdr *sqe = base_hdr;
2115
struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
2116
2117
sqe->wqe_type = wqe->type;
2118
sqe->flags = wqe->flags;
2119
sqe->wqe_size = wqe_slots;
2120
sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
2121
sqe->length = cpu_to_le32((u32)data_len);
2122
ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
2123
ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
2124
if (qp->mtu)
2125
pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2126
if (!pkt_num)
2127
pkt_num = 1;
2128
sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2129
2130
dev_dbg(&sq_hwq->pdev->dev,
2131
"QPLIB: FP: RDMA WQE:\n"
2132
"\twqe_type = 0x%x\n"
2133
"\tflags = 0x%x\n"
2134
"\twqe_size = 0x%x\n"
2135
"\timmdata = 0x%x\n"
2136
"\tlength = 0x%x\n"
2137
"\tremote_va = 0x%llx\n"
2138
"\tremote_key = 0x%x\n",
2139
sqe->wqe_type, sqe->flags, sqe->wqe_size,
2140
sqe->imm_data, sqe->length, ext_sqe->remote_va,
2141
ext_sqe->remote_key);
2142
break;
2143
}
2144
case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2145
/* fall-thru */
2146
case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2147
{
2148
struct sq_atomic_hdr *sqe = base_hdr;
2149
struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
2150
2151
sqe->wqe_type = wqe->type;
2152
sqe->flags = wqe->flags;
2153
sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
2154
sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
2155
ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
2156
ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
2157
if (qp->mtu)
2158
pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2159
if (!pkt_num)
2160
pkt_num = 1;
2161
sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2162
break;
2163
}
2164
case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2165
{
2166
struct sq_localinvalidate_hdr *sqe = base_hdr;
2167
2168
sqe->wqe_type = wqe->type;
2169
sqe->flags = wqe->flags;
2170
sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
2171
2172
dev_dbg(&sq_hwq->pdev->dev,
2173
"QPLIB: FP: LOCAL INV WQE:\n"
2174
"\twqe_type = 0x%x\n"
2175
"\tflags = 0x%x\n"
2176
"\tinv_l_key = 0x%x\n",
2177
sqe->wqe_type, sqe->flags, sqe->inv_l_key);
2178
msn_update = false;
2179
break;
2180
}
2181
case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
2182
{
2183
struct sq_fr_pmr_hdr *sqe = base_hdr;
2184
struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
2185
2186
sqe->wqe_type = wqe->type;
2187
sqe->flags = wqe->flags;
2188
sqe->access_cntl = wqe->frmr.access_cntl |
2189
SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2190
sqe->zero_based_page_size_log =
2191
(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
2192
SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
2193
(wqe->frmr.zero_based == true ? SQ_FR_PMR_ZERO_BASED : 0);
2194
sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
2195
/* TODO: OFED only provides length of MR up to 32-bits for FRMR */
2196
temp32 = cpu_to_le32(wqe->frmr.length);
2197
memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
2198
sqe->numlevels_pbl_page_size_log =
2199
((wqe->frmr.pbl_pg_sz_log <<
2200
SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
2201
SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
2202
((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
2203
SQ_FR_PMR_NUMLEVELS_MASK);
2204
if (!wqe->frmr.levels && !wqe->frmr.pbl_ptr) {
2205
ext_sqe->pblptr = cpu_to_le64(wqe->frmr.page_list[0]);
2206
} else {
2207
for (i = 0; i < wqe->frmr.page_list_len; i++)
2208
wqe->frmr.pbl_ptr[i] = cpu_to_le64(
2209
wqe->frmr.page_list[i] |
2210
PTU_PTE_VALID);
2211
ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
2212
}
2213
ext_sqe->va = cpu_to_le64(wqe->frmr.va);
2214
dev_dbg(&sq_hwq->pdev->dev,
2215
"QPLIB: FP: FRMR WQE:\n"
2216
"\twqe_type = 0x%x\n"
2217
"\tflags = 0x%x\n"
2218
"\taccess_cntl = 0x%x\n"
2219
"\tzero_based_page_size_log = 0x%x\n"
2220
"\tl_key = 0x%x\n"
2221
"\tlength = 0x%x\n"
2222
"\tnumlevels_pbl_page_size_log = 0x%x\n"
2223
"\tpblptr = 0x%llx\n"
2224
"\tva = 0x%llx\n",
2225
sqe->wqe_type, sqe->flags, sqe->access_cntl,
2226
sqe->zero_based_page_size_log, sqe->l_key,
2227
*(u32 *)sqe->length, sqe->numlevels_pbl_page_size_log,
2228
ext_sqe->pblptr, ext_sqe->va);
2229
msn_update = false;
2230
break;
2231
}
2232
case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2233
{
2234
struct sq_bind_hdr *sqe = base_hdr;
2235
struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2236
2237
sqe->wqe_type = wqe->type;
2238
sqe->flags = wqe->flags;
2239
sqe->access_cntl = wqe->bind.access_cntl;
2240
sqe->mw_type_zero_based = wqe->bind.mw_type |
2241
(wqe->bind.zero_based == true ? SQ_BIND_ZERO_BASED : 0);
2242
sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2243
sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2244
ext_sqe->va = cpu_to_le64(wqe->bind.va);
2245
ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2246
dev_dbg(&sq_hwq->pdev->dev,
2247
"QPLIB: FP: BIND WQE:\n"
2248
"\twqe_type = 0x%x\n"
2249
"\tflags = 0x%x\n"
2250
"\taccess_cntl = 0x%x\n"
2251
"\tmw_type_zero_based = 0x%x\n"
2252
"\tparent_l_key = 0x%x\n"
2253
"\tl_key = 0x%x\n"
2254
"\tva = 0x%llx\n"
2255
"\tlength = 0x%x\n",
2256
sqe->wqe_type, sqe->flags, sqe->access_cntl,
2257
sqe->mw_type_zero_based, sqe->parent_l_key,
2258
sqe->l_key, sqe->va, ext_sqe->length_lo);
2259
msn_update = false;
2260
break;
2261
}
2262
default:
2263
/* Bad wqe, return error */
2264
rc = -EINVAL;
2265
goto done;
2266
}
2267
if (!qp->is_host_msn_tbl || msn_update) {
2268
swq->next_psn = sq->psn & BTH_PSN_MASK;
2269
bnxt_qplib_fill_psn_search(qp, wqe, swq);
2270
}
2271
2272
queue_err:
2273
bnxt_qplib_swq_mod_start(sq, wqe_idx);
2274
bnxt_qplib_hwq_incr_prod(&sq->dbinfo, sq_hwq, swq->slots);
2275
qp->wqe_cnt++;
2276
done:
2277
if (sch_handler) {
2278
nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2279
if (nq_work) {
2280
nq_work->cq = qp->scq;
2281
nq_work->nq = qp->scq->nq;
2282
INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2283
queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2284
} else {
2285
dev_err(&sq->hwq.pdev->dev,
2286
"QPLIB: FP: Failed to allocate SQ nq_work!\n");
2287
rc = -ENOMEM;
2288
}
2289
}
2290
return rc;
2291
}
2292
2293
void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2294
{
2295
struct bnxt_qplib_q *rq = &qp->rq;
2296
2297
bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2298
}
2299
2300
void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq)
2301
{
2302
struct bnxt_qplib_nq *nq;
2303
2304
if (!(cq && cq->nq))
2305
return;
2306
2307
nq = cq->nq;
2308
spin_lock_bh(&cq->compl_lock);
2309
if (nq->cqn_handler) {
2310
dev_dbg(&nq->res->pdev->dev,
2311
"%s:Trigger cq = %p event nq = %p\n",
2312
__func__, cq, nq);
2313
nq->cqn_handler(nq, cq);
2314
}
2315
spin_unlock_bh(&cq->compl_lock);
2316
}
2317
2318
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2319
struct bnxt_qplib_swqe *wqe)
2320
{
2321
struct bnxt_qplib_nq_work *nq_work = NULL;
2322
struct bnxt_qplib_q *rq = &qp->rq;
2323
struct bnxt_qplib_hwq *rq_hwq;
2324
struct bnxt_qplib_swq *swq;
2325
bool sch_handler = false;
2326
struct rq_wqe_hdr *base_hdr;
2327
struct rq_ext_hdr *ext_hdr;
2328
struct sq_sge *dsge;
2329
u8 wqe_slots;
2330
u32 wqe_idx;
2331
u32 sw_prod;
2332
int rc = 0;
2333
2334
rq_hwq = &rq->hwq;
2335
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2336
dev_err(&rq_hwq->pdev->dev,
2337
"QPLIB: FP: QP (0x%x) is in the 0x%x state\n",
2338
qp->id, qp->state);
2339
rc = -EINVAL;
2340
goto done;
2341
}
2342
2343
wqe_slots = _calculate_wqe_byte(qp, wqe, NULL);
2344
if (bnxt_qplib_queue_full(rq_hwq, rq->dbinfo.max_slot)) {
2345
dev_err(&rq_hwq->pdev->dev,
2346
"QPLIB: FP: QP (0x%x) RQ is full!\n", qp->id);
2347
rc = -EINVAL;
2348
goto done;
2349
}
2350
2351
swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2352
swq->wr_id = wqe->wr_id;
2353
swq->slots = rq->dbinfo.max_slot;
2354
dev_dbg(&rq_hwq->pdev->dev,
2355
"QPLIB: FP: post RQ wr_id[%d] = 0x%llx\n",
2356
wqe_idx, swq->wr_id);
2357
if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2358
sch_handler = true;
2359
dev_dbg(&rq_hwq->pdev->dev, "%s Error QP. Sched a flushed cmpl\n",
2360
__func__);
2361
goto queue_err;
2362
}
2363
2364
sw_prod = rq_hwq->prod;
2365
base_hdr = bnxt_qplib_get_qe(rq_hwq, sw_prod, NULL);
2366
sw_prod++;
2367
ext_hdr = bnxt_qplib_get_qe(rq_hwq, (sw_prod % rq_hwq->depth), NULL);
2368
sw_prod++;
2369
memset(base_hdr, 0, sizeof(struct sq_sge));
2370
memset(ext_hdr, 0, sizeof(struct sq_sge));
2371
2372
if (!wqe->num_sge) {
2373
dsge = bnxt_qplib_get_qe(rq_hwq, (sw_prod % rq_hwq->depth), NULL);
2374
dsge->size = 0;
2375
wqe_slots++;
2376
} else {
2377
bnxt_qplib_put_sges(rq_hwq, wqe->sg_list, wqe->num_sge, &sw_prod);
2378
}
2379
base_hdr->wqe_type = wqe->type;
2380
base_hdr->flags = wqe->flags;
2381
base_hdr->wqe_size = wqe_slots;
2382
base_hdr->wr_id |= cpu_to_le32(wqe_idx);
2383
queue_err:
2384
bnxt_qplib_swq_mod_start(rq, wqe_idx);
2385
bnxt_qplib_hwq_incr_prod(&rq->dbinfo, &rq->hwq, swq->slots);
2386
done:
2387
if (sch_handler) {
2388
nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2389
if (nq_work) {
2390
nq_work->cq = qp->rcq;
2391
nq_work->nq = qp->rcq->nq;
2392
INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2393
queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2394
} else {
2395
dev_err(&rq->hwq.pdev->dev,
2396
"QPLIB: FP: Failed to allocate RQ nq_work!\n");
2397
rc = -ENOMEM;
2398
}
2399
}
2400
return rc;
2401
}
2402
2403
/* CQ */
2404
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2405
{
2406
struct bnxt_qplib_hwq_attr hwq_attr = {};
2407
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2408
struct creq_create_cq_resp resp = {};
2409
struct bnxt_qplib_cmdqmsg msg = {};
2410
struct cmdq_create_cq req = {};
2411
struct bnxt_qplib_reftbl *tbl;
2412
unsigned long flag;
2413
u32 pg_sz_lvl = 0;
2414
int rc;
2415
2416
hwq_attr.res = res;
2417
hwq_attr.depth = cq->max_wqe;
2418
hwq_attr.stride = sizeof(struct cq_base);
2419
hwq_attr.type = HWQ_TYPE_QUEUE;
2420
hwq_attr.sginfo = &cq->sginfo;
2421
rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2422
if (rc)
2423
goto exit;
2424
2425
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_CQ,
2426
sizeof(req));
2427
2428
if (!cq->dpi) {
2429
dev_err(&rcfw->pdev->dev,
2430
"QPLIB: FP: CREATE_CQ failed due to NULL DPI\n");
2431
return -EINVAL;
2432
}
2433
req.dpi = cpu_to_le32(cq->dpi->dpi);
2434
req.cq_handle = cpu_to_le64(cq->cq_handle);
2435
2436
req.cq_size = cpu_to_le32(cq->max_wqe);
2437
req.pbl = cpu_to_le64(_get_base_addr(&cq->hwq));
2438
pg_sz_lvl = _get_base_pg_size(&cq->hwq) << CMDQ_CREATE_CQ_PG_SIZE_SFT;
2439
pg_sz_lvl |= ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
2440
CMDQ_CREATE_CQ_LVL_SFT);
2441
req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2442
2443
req.cq_fco_cnq_id = cpu_to_le32(
2444
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2445
CMDQ_CREATE_CQ_CNQ_ID_SFT);
2446
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2447
sizeof(resp), 0);
2448
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2449
if (rc)
2450
goto fail;
2451
cq->id = le32_to_cpu(resp.xid);
2452
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2453
init_waitqueue_head(&cq->waitq);
2454
INIT_LIST_HEAD(&cq->sqf_head);
2455
INIT_LIST_HEAD(&cq->rqf_head);
2456
spin_lock_init(&cq->flush_lock);
2457
spin_lock_init(&cq->compl_lock);
2458
2459
/* init dbinfo */
2460
cq->cctx = res->cctx;
2461
cq->dbinfo.hwq = &cq->hwq;
2462
cq->dbinfo.xid = cq->id;
2463
cq->dbinfo.db = cq->dpi->dbr;
2464
cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2465
cq->dbinfo.flags = 0;
2466
cq->dbinfo.toggle = 0;
2467
cq->dbinfo.res = res;
2468
cq->dbinfo.seed = cq->id;
2469
spin_lock_init(&cq->dbinfo.lock);
2470
cq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
2471
cq->dbinfo.shadow_key_arm_ena = BNXT_QPLIB_DBR_KEY_INVALID;
2472
2473
tbl = &res->reftbl.cqref;
2474
spin_lock_irqsave(&tbl->lock, flag);
2475
tbl->rec[GET_TBL_INDEX(cq->id, tbl)].xid = cq->id;
2476
tbl->rec[GET_TBL_INDEX(cq->id, tbl)].handle = cq;
2477
spin_unlock_irqrestore(&tbl->lock, flag);
2478
2479
bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2480
return 0;
2481
2482
fail:
2483
bnxt_qplib_free_hwq(res, &cq->hwq);
2484
exit:
2485
return rc;
2486
}
2487
2488
int bnxt_qplib_modify_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2489
{
2490
/* TODO: Modify CQ threshold are passed to the HW via DBR */
2491
return 0;
2492
}
2493
2494
void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2495
struct bnxt_qplib_cq *cq)
2496
{
2497
bnxt_qplib_free_hwq(res, &cq->hwq);
2498
memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2499
/* Reset only the cons bit in the flags */
2500
cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2501
2502
/* Tell HW to switch over to the new CQ */
2503
if (!cq->resize_hwq.is_user)
2504
bnxt_qplib_cq_coffack_db(&cq->dbinfo);
2505
}
2506
2507
int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2508
int new_cqes)
2509
{
2510
struct bnxt_qplib_hwq_attr hwq_attr = {};
2511
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2512
struct creq_resize_cq_resp resp = {};
2513
struct bnxt_qplib_cmdqmsg msg = {};
2514
struct cmdq_resize_cq req = {};
2515
u32 pgsz = 0, lvl = 0, nsz = 0;
2516
struct bnxt_qplib_pbl *pbl;
2517
u16 count = -1;
2518
int rc;
2519
2520
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_RESIZE_CQ,
2521
sizeof(req));
2522
2523
hwq_attr.sginfo = &cq->sginfo;
2524
hwq_attr.res = res;
2525
hwq_attr.depth = new_cqes;
2526
hwq_attr.stride = sizeof(struct cq_base);
2527
hwq_attr.type = HWQ_TYPE_QUEUE;
2528
rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2529
if (rc)
2530
return rc;
2531
2532
dev_dbg(&rcfw->pdev->dev, "QPLIB: FP: %s: pbl_lvl: %d\n", __func__,
2533
cq->resize_hwq.level);
2534
req.cq_cid = cpu_to_le32(cq->id);
2535
pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2536
pgsz = ((pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_4K :
2537
pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_8K :
2538
pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_64K :
2539
pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_RESIZE_CQ_PG_SIZE_PG_2M :
2540
pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_RESIZE_CQ_PG_SIZE_PG_8M :
2541
pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_RESIZE_CQ_PG_SIZE_PG_1G :
2542
CMDQ_RESIZE_CQ_PG_SIZE_PG_4K) & CMDQ_RESIZE_CQ_PG_SIZE_MASK);
2543
lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2544
CMDQ_RESIZE_CQ_LVL_MASK;
2545
nsz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2546
CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2547
req.new_cq_size_pg_size_lvl = cpu_to_le32(nsz|pgsz|lvl);
2548
req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2549
2550
if (!cq->resize_hwq.is_user)
2551
set_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2552
2553
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2554
sizeof(resp), 0);
2555
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2556
if (rc)
2557
goto fail;
2558
2559
if (!cq->resize_hwq.is_user) {
2560
wait:
2561
/* Wait here for the HW to switch the CQ over */
2562
if (wait_event_interruptible_timeout(cq->waitq,
2563
!test_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags),
2564
msecs_to_jiffies(CQ_RESIZE_WAIT_TIME_MS)) ==
2565
-ERESTARTSYS && count--)
2566
goto wait;
2567
2568
if (test_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags)) {
2569
dev_err(&rcfw->pdev->dev,
2570
"QPLIB: FP: RESIZE_CQ timed out\n");
2571
rc = -ETIMEDOUT;
2572
goto fail;
2573
}
2574
2575
bnxt_qplib_resize_cq_complete(res, cq);
2576
}
2577
2578
return 0;
2579
fail:
2580
if (!cq->resize_hwq.is_user) {
2581
bnxt_qplib_free_hwq(res, &cq->resize_hwq);
2582
clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2583
}
2584
return rc;
2585
}
2586
2587
void bnxt_qplib_free_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2588
{
2589
bnxt_qplib_free_hwq(res, &cq->hwq);
2590
}
2591
2592
static void bnxt_qplib_sync_cq(struct bnxt_qplib_cq *cq)
2593
{
2594
struct bnxt_qplib_nq *nq = cq->nq;
2595
/* Flush any pending work and synchronize irq */
2596
flush_workqueue(cq->nq->cqn_wq);
2597
mutex_lock(&nq->lock);
2598
if (nq->requested)
2599
synchronize_irq(nq->msix_vec);
2600
mutex_unlock(&nq->lock);
2601
}
2602
2603
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2604
{
2605
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2606
struct creq_destroy_cq_resp resp = {};
2607
struct bnxt_qplib_cmdqmsg msg = {};
2608
struct cmdq_destroy_cq req = {};
2609
struct bnxt_qplib_reftbl *tbl;
2610
u16 total_cnq_events;
2611
unsigned long flag;
2612
int rc;
2613
2614
tbl = &res->reftbl.cqref;
2615
spin_lock_irqsave(&tbl->lock, flag);
2616
tbl->rec[GET_TBL_INDEX(cq->id, tbl)].handle = NULL;
2617
tbl->rec[GET_TBL_INDEX(cq->id, tbl)].xid = 0;
2618
spin_unlock_irqrestore(&tbl->lock, flag);
2619
2620
bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_CQ,
2621
sizeof(req));
2622
2623
req.cq_cid = cpu_to_le32(cq->id);
2624
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2625
sizeof(resp), 0);
2626
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2627
if (rc)
2628
return rc;
2629
2630
total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2631
if (total_cnq_events >= 0)
2632
dev_dbg(&rcfw->pdev->dev,
2633
"%s: cq_id = 0x%x cq = 0x%p resp.total_cnq_events = 0x%x\n",
2634
__func__, cq->id, cq, total_cnq_events);
2635
__wait_for_all_nqes(cq, total_cnq_events);
2636
bnxt_qplib_sync_cq(cq);
2637
bnxt_qplib_free_hwq(res, &cq->hwq);
2638
return 0;
2639
}
2640
2641
static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2642
struct bnxt_qplib_cqe **pcqe, int *budget)
2643
{
2644
struct bnxt_qplib_cqe *cqe;
2645
u32 start, last;
2646
int rc = 0;
2647
2648
/* Now complete all outstanding SQEs with FLUSHED_ERR */
2649
start = sq->swq_start;
2650
cqe = *pcqe;
2651
while (*budget) {
2652
last = sq->swq_last;
2653
if (start == last) {
2654
break;
2655
}
2656
/* Skip the FENCE WQE completions */
2657
if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2658
bnxt_re_legacy_cancel_phantom_processing(qp);
2659
goto skip_compl;
2660
}
2661
2662
memset(cqe, 0, sizeof(*cqe));
2663
cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2664
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2665
cqe->qp_handle = (u64)qp;
2666
cqe->wr_id = sq->swq[last].wr_id;
2667
cqe->src_qp = qp->id;
2668
cqe->type = sq->swq[last].type;
2669
dev_dbg(&sq->hwq.pdev->dev,
2670
"QPLIB: FP: CQ Processed terminal Req \n");
2671
dev_dbg(&sq->hwq.pdev->dev,
2672
"QPLIB: wr_id[%d] = 0x%llx with status 0x%x\n",
2673
last, cqe->wr_id, cqe->status);
2674
cqe++;
2675
(*budget)--;
2676
skip_compl:
2677
bnxt_qplib_hwq_incr_cons(sq->hwq.depth,
2678
&sq->hwq.cons,
2679
sq->swq[last].slots,
2680
&sq->dbinfo.flags);
2681
sq->swq_last = sq->swq[last].next_idx;
2682
}
2683
*pcqe = cqe;
2684
if (!*budget && sq->swq_last != start)
2685
/* Out of budget */
2686
rc = -EAGAIN;
2687
dev_dbg(&sq->hwq.pdev->dev, "QPLIB: FP: Flush SQ rc = 0x%x\n", rc);
2688
2689
return rc;
2690
}
2691
2692
static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2693
struct bnxt_qplib_cqe **pcqe, int *budget)
2694
{
2695
struct bnxt_qplib_cqe *cqe;
2696
u32 start, last;
2697
int opcode = 0;
2698
int rc = 0;
2699
2700
switch (qp->type) {
2701
case CMDQ_CREATE_QP1_TYPE_GSI:
2702
opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2703
break;
2704
case CMDQ_CREATE_QP_TYPE_RC:
2705
opcode = CQ_BASE_CQE_TYPE_RES_RC;
2706
break;
2707
case CMDQ_CREATE_QP_TYPE_UD:
2708
opcode = CQ_BASE_CQE_TYPE_RES_UD;
2709
break;
2710
}
2711
2712
/* Flush the rest of the RQ */
2713
start = rq->swq_start;
2714
cqe = *pcqe;
2715
while (*budget) {
2716
last = rq->swq_last;
2717
if (last == start)
2718
break;
2719
memset(cqe, 0, sizeof(*cqe));
2720
cqe->status =
2721
CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2722
cqe->opcode = opcode;
2723
cqe->qp_handle = (u64)qp;
2724
cqe->wr_id = rq->swq[last].wr_id;
2725
dev_dbg(&rq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Res RC \n");
2726
dev_dbg(&rq->hwq.pdev->dev,
2727
"QPLIB: rq[%d] = 0x%llx with status 0x%x\n",
2728
last, cqe->wr_id, cqe->status);
2729
cqe++;
2730
(*budget)--;
2731
bnxt_qplib_hwq_incr_cons(rq->hwq.depth,
2732
&rq->hwq.cons,
2733
rq->swq[last].slots,
2734
&rq->dbinfo.flags);
2735
rq->swq_last = rq->swq[last].next_idx;
2736
}
2737
*pcqe = cqe;
2738
if (!*budget && rq->swq_last != start)
2739
/* Out of budget */
2740
rc = -EAGAIN;
2741
2742
dev_dbg(&rq->hwq.pdev->dev, "QPLIB: FP: Flush RQ rc = 0x%x\n", rc);
2743
return rc;
2744
}
2745
2746
void bnxt_qplib_mark_qp_error(void *qp_handle)
2747
{
2748
struct bnxt_qplib_qp *qp = qp_handle;
2749
2750
if (!qp)
2751
return;
2752
2753
/* Must block new posting of SQ and RQ */
2754
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2755
qp->state = qp->cur_qp_state;
2756
2757
/* Add qp to flush list of the CQ */
2758
if (!qp->is_user)
2759
bnxt_qplib_add_flush_qp(qp);
2760
}
2761
2762
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2763
* CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2764
*/
2765
static int bnxt_re_legacy_do_wa9060(struct bnxt_qplib_qp *qp,
2766
struct bnxt_qplib_cq *cq,
2767
u32 cq_cons, u32 swq_last,
2768
u32 cqe_sq_cons)
2769
{
2770
struct bnxt_qplib_q *sq = &qp->sq;
2771
struct bnxt_qplib_swq *swq;
2772
u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2773
struct cq_terminal *peek_term_hwcqe;
2774
struct cq_req *peek_req_hwcqe;
2775
struct bnxt_qplib_qp *peek_qp;
2776
struct bnxt_qplib_q *peek_sq;
2777
struct cq_base *peek_hwcqe;
2778
int i, rc = 0;
2779
2780
/* Check for the psn_search marking before completing */
2781
swq = &sq->swq[swq_last];
2782
if (swq->psn_search &&
2783
le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2784
/* Unmark */
2785
swq->psn_search->flags_next_psn = cpu_to_le32
2786
(le32_to_cpu(swq->psn_search->flags_next_psn)
2787
& ~0x80000000);
2788
dev_dbg(&cq->hwq.pdev->dev,
2789
"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2790
cq_cons, qp->id, swq_last, cqe_sq_cons);
2791
sq->condition = true;
2792
sq->legacy_send_phantom = true;
2793
2794
/* TODO: Only ARM if the previous SQE is ARMALL */
2795
bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2796
2797
rc = -EAGAIN;
2798
goto out;
2799
}
2800
if (sq->condition == true) {
2801
/* Peek at the completions */
2802
peek_flags = cq->dbinfo.flags;
2803
peek_sw_cq_cons = cq_cons;
2804
i = cq->hwq.depth;
2805
while (i--) {
2806
peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2807
peek_sw_cq_cons, NULL);
2808
/* If the next hwcqe is VALID */
2809
if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2810
/* If the next hwcqe is a REQ */
2811
dma_rmb();
2812
switch (peek_hwcqe->cqe_type_toggle &
2813
CQ_BASE_CQE_TYPE_MASK) {
2814
case CQ_BASE_CQE_TYPE_REQ:
2815
peek_req_hwcqe = (struct cq_req *)
2816
peek_hwcqe;
2817
peek_qp = (struct bnxt_qplib_qp *)
2818
le64_to_cpu(
2819
peek_req_hwcqe->qp_handle);
2820
peek_sq = &peek_qp->sq;
2821
peek_sq_cons_idx =
2822
((le16_to_cpu(
2823
peek_req_hwcqe->sq_cons_idx)
2824
- 1) % sq->max_wqe);
2825
/* If the hwcqe's sq's wr_id matches */
2826
if (peek_sq == sq &&
2827
sq->swq[peek_sq_cons_idx].wr_id ==
2828
BNXT_QPLIB_FENCE_WRID) {
2829
/* Unbreak only if the phantom
2830
comes back */
2831
dev_dbg(&cq->hwq.pdev->dev,
2832
"FP: Process Req qp=0x%x current sq cons sw=0x%x cqe=0x%x\n",
2833
qp->id, swq_last,
2834
cqe_sq_cons);
2835
sq->condition = false;
2836
sq->single = true;
2837
sq->phantom_cqe_cnt++;
2838
dev_dbg(&cq->hwq.pdev->dev,
2839
"qp %#x condition restored at peek cq_cons=%#x sq_cons_idx %#x, phantom_cqe_cnt: %d unmark\n",
2840
peek_qp->id,
2841
peek_sw_cq_cons,
2842
peek_sq_cons_idx,
2843
sq->phantom_cqe_cnt);
2844
rc = 0;
2845
goto out;
2846
}
2847
break;
2848
2849
case CQ_BASE_CQE_TYPE_TERMINAL:
2850
/* In case the QP has gone into the
2851
error state */
2852
peek_term_hwcqe = (struct cq_terminal *)
2853
peek_hwcqe;
2854
peek_qp = (struct bnxt_qplib_qp *)
2855
le64_to_cpu(
2856
peek_term_hwcqe->qp_handle);
2857
if (peek_qp == qp) {
2858
sq->condition = false;
2859
rc = 0;
2860
goto out;
2861
}
2862
break;
2863
default:
2864
break;
2865
}
2866
/* Valid but not the phantom, so keep looping */
2867
} else {
2868
/* Not valid yet, just exit and wait */
2869
rc = -EINVAL;
2870
goto out;
2871
}
2872
bnxt_qplib_hwq_incr_cons(cq->hwq.depth,
2873
&peek_sw_cq_cons,
2874
1, &peek_flags);
2875
}
2876
dev_err(&cq->hwq.pdev->dev,
2877
"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2878
cq_cons, qp->id, swq_last, cqe_sq_cons);
2879
rc = -EINVAL;
2880
}
2881
out:
2882
return rc;
2883
}
2884
2885
static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2886
{
2887
struct bnxt_qplib_hwq *sq_hwq;
2888
struct bnxt_qplib_swq *swq;
2889
int cqe_sq_cons = -1;
2890
u32 start, last;
2891
2892
sq_hwq = &sq->hwq;
2893
2894
start = sq->swq_start;
2895
last = sq->swq_last;
2896
2897
while (last != start) {
2898
swq = &sq->swq[last];
2899
if (swq->slot_idx == cqe_slot) {
2900
cqe_sq_cons = swq->next_idx;
2901
dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2902
__func__, cqe_sq_cons, cqe_slot);
2903
break;
2904
}
2905
2906
last = swq->next_idx;
2907
}
2908
return cqe_sq_cons;
2909
}
2910
2911
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2912
struct cq_req *hwcqe,
2913
struct bnxt_qplib_cqe **pcqe, int *budget,
2914
u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2915
{
2916
struct bnxt_qplib_qp *qp;
2917
struct bnxt_qplib_q *sq;
2918
struct bnxt_qplib_cqe *cqe;
2919
u32 cqe_sq_cons, slot_num;
2920
struct bnxt_qplib_swq *swq;
2921
int cqe_cons;
2922
int rc = 0;
2923
2924
qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
2925
dev_dbg(&cq->hwq.pdev->dev, "FP: Process Req qp=0x%p\n", qp);
2926
if (!qp) {
2927
dev_err(&cq->hwq.pdev->dev,
2928
"QPLIB: FP: Process Req qp is NULL\n");
2929
return -EINVAL;
2930
}
2931
sq = &qp->sq;
2932
2933
cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2934
if (qp->sq.flushed) {
2935
dev_dbg(&cq->hwq.pdev->dev,
2936
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2937
goto done;
2938
}
2939
2940
if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2941
slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2942
cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2943
if (cqe_cons < 0) {
2944
dev_dbg(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2945
__func__, slot_num);
2946
goto done;
2947
}
2948
cqe_sq_cons = cqe_cons;
2949
dev_dbg(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2950
__func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2951
}
2952
2953
/* Require to walk the sq's swq to fabricate CQEs for all previously
2954
* signaled SWQEs due to CQE aggregation from the current sq cons
2955
* to the cqe_sq_cons
2956
*/
2957
cqe = *pcqe;
2958
while (*budget) {
2959
if (sq->swq_last == cqe_sq_cons)
2960
/* Done */
2961
break;
2962
2963
swq = &sq->swq[sq->swq_last];
2964
memset(cqe, 0, sizeof(*cqe));
2965
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2966
cqe->qp_handle = (u64)qp;
2967
cqe->src_qp = qp->id;
2968
cqe->wr_id = swq->wr_id;
2969
2970
if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2971
goto skip;
2972
2973
cqe->type = swq->type;
2974
2975
/* For the last CQE, check for status. For errors, regardless
2976
* of the request being signaled or not, it must complete with
2977
* the hwcqe error status
2978
*/
2979
if (swq->next_idx == cqe_sq_cons &&
2980
hwcqe->status != CQ_REQ_STATUS_OK) {
2981
cqe->status = hwcqe->status;
2982
dev_err(&cq->hwq.pdev->dev,
2983
"QPLIB: FP: CQ Processed Req \n");
2984
dev_err(&cq->hwq.pdev->dev,
2985
"QPLIB: QP 0x%x wr_id[%d] = 0x%lx vendor type 0x%x with vendor status 0x%x\n",
2986
cqe->src_qp, sq->swq_last, cqe->wr_id, cqe->type, cqe->status);
2987
cqe++;
2988
(*budget)--;
2989
bnxt_qplib_mark_qp_error(qp);
2990
} else {
2991
/* Before we complete, do WA 9060 */
2992
if (!_is_chip_gen_p5_p7(qp->cctx)) {
2993
if (bnxt_re_legacy_do_wa9060(qp, cq, cq_cons,
2994
sq->swq_last,
2995
cqe_sq_cons)) {
2996
*lib_qp = qp;
2997
goto out;
2998
}
2999
}
3000
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
3001
3002
dev_dbg(&cq->hwq.pdev->dev,
3003
"QPLIB: FP: CQ Processed Req \n");
3004
dev_dbg(&cq->hwq.pdev->dev,
3005
"QPLIB: wr_id[%d] = 0x%llx \n",
3006
sq->swq_last, cqe->wr_id);
3007
dev_dbg(&cq->hwq.pdev->dev,
3008
"QPLIB: with status 0x%x\n", cqe->status);
3009
cqe->status = CQ_REQ_STATUS_OK;
3010
cqe++;
3011
(*budget)--;
3012
}
3013
}
3014
skip:
3015
bnxt_qplib_hwq_incr_cons(sq->hwq.depth, &sq->hwq.cons,
3016
swq->slots, &sq->dbinfo.flags);
3017
sq->swq_last = swq->next_idx;
3018
if (sq->single == true)
3019
break;
3020
}
3021
out:
3022
*pcqe = cqe;
3023
if (sq->swq_last != cqe_sq_cons) {
3024
/* Out of budget */
3025
rc = -EAGAIN;
3026
goto done;
3027
}
3028
/* Back to normal completion mode only after it has completed all of
3029
the WC for this CQE */
3030
sq->single = false;
3031
done:
3032
return rc;
3033
}
3034
3035
static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
3036
{
3037
spin_lock(&srq->hwq.lock);
3038
srq->swq[srq->last_idx].next_idx = (int)tag;
3039
srq->last_idx = (int)tag;
3040
srq->swq[srq->last_idx].next_idx = -1;
3041
bnxt_qplib_hwq_incr_cons(srq->hwq.depth, &srq->hwq.cons,
3042
srq->dbinfo.max_slot, &srq->dbinfo.flags);
3043
spin_unlock(&srq->hwq.lock);
3044
}
3045
3046
static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
3047
struct cq_res_rc *hwcqe,
3048
struct bnxt_qplib_cqe **pcqe,
3049
int *budget)
3050
{
3051
struct bnxt_qplib_srq *srq;
3052
struct bnxt_qplib_cqe *cqe;
3053
struct bnxt_qplib_qp *qp;
3054
struct bnxt_qplib_q *rq;
3055
u32 wr_id_idx;
3056
int rc = 0;
3057
3058
qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
3059
if (!qp) {
3060
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL\n");
3061
return -EINVAL;
3062
}
3063
if (qp->rq.flushed) {
3064
dev_dbg(&cq->hwq.pdev->dev,
3065
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
3066
goto done;
3067
}
3068
3069
cqe = *pcqe;
3070
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3071
cqe->length = le32_to_cpu(hwcqe->length);
3072
cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
3073
cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
3074
cqe->flags = le16_to_cpu(hwcqe->flags);
3075
cqe->status = hwcqe->status;
3076
cqe->qp_handle = (u64)(unsigned long)qp;
3077
3078
wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
3079
CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
3080
if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
3081
srq = qp->srq;
3082
if (!srq) {
3083
dev_err(&cq->hwq.pdev->dev,
3084
"QPLIB: FP: SRQ used but not defined??\n");
3085
return -EINVAL;
3086
}
3087
if (wr_id_idx > srq->hwq.depth - 1) {
3088
dev_err(&cq->hwq.pdev->dev,
3089
"QPLIB: FP: CQ Process RC \n");
3090
dev_err(&cq->hwq.pdev->dev,
3091
"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x\n",
3092
wr_id_idx, srq->hwq.depth);
3093
return -EINVAL;
3094
}
3095
cqe->wr_id = srq->swq[wr_id_idx].wr_id;
3096
bnxt_qplib_release_srqe(srq, wr_id_idx);
3097
dev_dbg(&srq->hwq.pdev->dev,
3098
"QPLIB: FP: CQ Processed RC SRQ wr_id[%d] = 0x%llx\n",
3099
wr_id_idx, cqe->wr_id);
3100
cqe++;
3101
(*budget)--;
3102
*pcqe = cqe;
3103
} else {
3104
rq = &qp->rq;
3105
if (wr_id_idx > (rq->max_wqe - 1)) {
3106
dev_err(&cq->hwq.pdev->dev,
3107
"QPLIB: FP: CQ Process RC \n");
3108
dev_err(&cq->hwq.pdev->dev,
3109
"QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x\n",
3110
wr_id_idx, rq->hwq.depth);
3111
return -EINVAL;
3112
}
3113
if (wr_id_idx != rq->swq_last)
3114
return -EINVAL;
3115
cqe->wr_id = rq->swq[rq->swq_last].wr_id;
3116
dev_dbg(&cq->hwq.pdev->dev,
3117
"QPLIB: FP: CQ Processed RC RQ wr_id[%d] = 0x%llx\n",
3118
rq->swq_last, cqe->wr_id);
3119
cqe++;
3120
(*budget)--;
3121
bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons,
3122
rq->swq[rq->swq_last].slots,
3123
&rq->dbinfo.flags);
3124
rq->swq_last = rq->swq[rq->swq_last].next_idx;
3125
*pcqe = cqe;
3126
3127
if (hwcqe->status != CQ_RES_RC_STATUS_OK)
3128
bnxt_qplib_mark_qp_error(qp);
3129
}
3130
done:
3131
return rc;
3132
}
3133
3134
static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
3135
struct cq_res_ud_v2 *hwcqe,
3136
struct bnxt_qplib_cqe **pcqe,
3137
int *budget)
3138
{
3139
struct bnxt_qplib_srq *srq;
3140
struct bnxt_qplib_cqe *cqe;
3141
struct bnxt_qplib_qp *qp;
3142
struct bnxt_qplib_q *rq;
3143
u32 wr_id_idx;
3144
int rc = 0;
3145
u16 *smac;
3146
3147
qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
3148
if (!qp) {
3149
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL\n");
3150
return -EINVAL;
3151
}
3152
if (qp->rq.flushed) {
3153
dev_dbg(&cq->hwq.pdev->dev,
3154
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
3155
goto done;
3156
}
3157
cqe = *pcqe;
3158
cqe->opcode = hwcqe->cqe_type_toggle & CQ_RES_UD_V2_CQE_TYPE_MASK;
3159
cqe->length = le32_to_cpu((hwcqe->length & CQ_RES_UD_V2_LENGTH_MASK));
3160
cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata0);
3161
/* V2 format has metadata1 */
3162
cqe->cfa_meta |= (((le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) &
3163
CQ_RES_UD_V2_CFA_METADATA1_MASK) >>
3164
CQ_RES_UD_V2_CFA_METADATA1_SFT) <<
3165
BNXT_QPLIB_META1_SHIFT);
3166
cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
3167
cqe->flags = le16_to_cpu(hwcqe->flags);
3168
cqe->status = hwcqe->status;
3169
cqe->qp_handle = (u64)(unsigned long)qp;
3170
smac = (u16 *)cqe->smac;
3171
smac[2] = ntohs(le16_to_cpu(hwcqe->src_mac[0]));
3172
smac[1] = ntohs(le16_to_cpu(hwcqe->src_mac[1]));
3173
smac[0] = ntohs(le16_to_cpu(hwcqe->src_mac[2]));
3174
wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
3175
& CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_MASK;
3176
cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
3177
((le32_to_cpu(
3178
hwcqe->src_qp_high_srq_or_rq_wr_id) &
3179
CQ_RES_UD_V2_SRC_QP_HIGH_MASK) >> 8);
3180
3181
if (cqe->flags & CQ_RES_UD_V2_FLAGS_SRQ) {
3182
srq = qp->srq;
3183
if (!srq) {
3184
dev_err(&cq->hwq.pdev->dev,
3185
"QPLIB: FP: SRQ used but not defined??\n");
3186
return -EINVAL;
3187
}
3188
if (wr_id_idx > srq->hwq.depth - 1) {
3189
dev_err(&cq->hwq.pdev->dev,
3190
"QPLIB: FP: CQ Process UD \n");
3191
dev_err(&cq->hwq.pdev->dev,
3192
"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x\n",
3193
wr_id_idx, srq->hwq.depth);
3194
return -EINVAL;
3195
}
3196
cqe->wr_id = srq->swq[wr_id_idx].wr_id;
3197
bnxt_qplib_release_srqe(srq, wr_id_idx);
3198
dev_dbg(&cq->hwq.pdev->dev,
3199
"QPLIB: FP: CQ Processed UD SRQ wr_id[%d] = 0x%llx\n",
3200
wr_id_idx, cqe->wr_id);
3201
cqe++;
3202
(*budget)--;
3203
*pcqe = cqe;
3204
} else {
3205
rq = &qp->rq;
3206
if (wr_id_idx > (rq->max_wqe - 1)) {
3207
dev_err(&cq->hwq.pdev->dev,
3208
"QPLIB: FP: CQ Process UD \n");
3209
dev_err(&cq->hwq.pdev->dev,
3210
"QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x\n",
3211
wr_id_idx, rq->hwq.depth);
3212
return -EINVAL;
3213
}
3214
if (rq->swq_last != wr_id_idx)
3215
return -EINVAL;
3216
3217
cqe->wr_id = rq->swq[rq->swq_last].wr_id;
3218
dev_dbg(&cq->hwq.pdev->dev,
3219
"QPLIB: FP: CQ Processed UD RQ wr_id[%d] = 0x%llx\n",
3220
rq->swq_last, cqe->wr_id);
3221
cqe++;
3222
(*budget)--;
3223
bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons,
3224
rq->swq[rq->swq_last].slots,
3225
&rq->dbinfo.flags);
3226
rq->swq_last = rq->swq[rq->swq_last].next_idx;
3227
*pcqe = cqe;
3228
3229
if (hwcqe->status != CQ_RES_UD_V2_STATUS_OK)
3230
bnxt_qplib_mark_qp_error(qp);
3231
}
3232
done:
3233
return rc;
3234
}
3235
3236
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
3237
{
3238
3239
struct cq_base *hw_cqe;
3240
unsigned long flags;
3241
bool rc = true;
3242
3243
spin_lock_irqsave(&cq->hwq.lock, flags);
3244
hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3245
3246
/* Check for Valid bit. If the CQE is valid, return false */
3247
rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
3248
spin_unlock_irqrestore(&cq->hwq.lock, flags);
3249
return rc;
3250
}
3251
3252
static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
3253
struct cq_res_raweth_qp1 *hwcqe,
3254
struct bnxt_qplib_cqe **pcqe,
3255
int *budget)
3256
{
3257
struct bnxt_qplib_qp *qp;
3258
struct bnxt_qplib_q *rq;
3259
struct bnxt_qplib_srq *srq;
3260
struct bnxt_qplib_cqe *cqe;
3261
u32 wr_id_idx;
3262
int rc = 0;
3263
3264
qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
3265
if (!qp) {
3266
dev_err(&cq->hwq.pdev->dev,
3267
"QPLIB: process_cq Raw/QP1 qp is NULL\n");
3268
return -EINVAL;
3269
}
3270
if (qp->rq.flushed) {
3271
dev_dbg(&cq->hwq.pdev->dev,
3272
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
3273
goto done;
3274
}
3275
cqe = *pcqe;
3276
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3277
cqe->flags = le16_to_cpu(hwcqe->flags);
3278
cqe->qp_handle = (u64)(unsigned long)qp;
3279
3280
wr_id_idx = le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
3281
& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
3282
cqe->src_qp = qp->id;
3283
if (qp->id == 1 && !cqe->length) {
3284
/* Add workaround for the length misdetection */
3285
cqe->length = 296;
3286
} else {
3287
cqe->length = le16_to_cpu(hwcqe->length);
3288
}
3289
cqe->pkey_index = qp->pkey_index;
3290
memcpy(cqe->smac, qp->smac, 6);
3291
3292
cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
3293
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
3294
cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
3295
3296
dev_dbg(&cq->hwq.pdev->dev,
3297
"QPLIB: raweth_qp1_flags = 0x%x raweth_qp1_flags2 = 0x%x\n",
3298
cqe->raweth_qp1_flags, cqe->raweth_qp1_flags2);
3299
3300
if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
3301
srq = qp->srq;
3302
if (!srq) {
3303
dev_err(&cq->hwq.pdev->dev,
3304
"QPLIB: FP: SRQ used but not defined??\n");
3305
return -EINVAL;
3306
}
3307
if (wr_id_idx > srq->hwq.depth - 1) {
3308
dev_err(&cq->hwq.pdev->dev,
3309
"QPLIB: FP: CQ Process Raw/QP1 \n");
3310
dev_err(&cq->hwq.pdev->dev,
3311
"QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x\n",
3312
wr_id_idx, srq->hwq.depth);
3313
return -EINVAL;
3314
}
3315
cqe->wr_id = srq->swq[wr_id_idx].wr_id;
3316
dev_dbg(&cq->hwq.pdev->dev,
3317
"QPLIB: FP: CQ Processed Raw/QP1 SRQ \n");
3318
dev_dbg(&cq->hwq.pdev->dev,
3319
"QPLIB: wr_id[%d] = 0x%llx with status = 0x%x\n",
3320
wr_id_idx, cqe->wr_id, hwcqe->status);
3321
cqe++;
3322
(*budget)--;
3323
srq->hwq.cons++;
3324
*pcqe = cqe;
3325
} else {
3326
rq = &qp->rq;
3327
if (wr_id_idx > (rq->max_wqe - 1)) {
3328
dev_err(&cq->hwq.pdev->dev,
3329
"QPLIB: FP: CQ Process Raw/QP1 RQ wr_id \n");
3330
dev_err(&cq->hwq.pdev->dev,
3331
"QPLIB: ix 0x%x exceeded RQ max 0x%x\n",
3332
wr_id_idx, rq->max_wqe);
3333
return -EINVAL;
3334
}
3335
if (wr_id_idx != rq->swq_last)
3336
return -EINVAL;
3337
cqe->wr_id = rq->swq[rq->swq_last].wr_id;
3338
dev_dbg(&cq->hwq.pdev->dev,
3339
"QPLIB: FP: CQ Processed Raw/QP1 RQ \n");
3340
dev_dbg(&cq->hwq.pdev->dev,
3341
"QPLIB: wr_id[%d] = 0x%llx with status = 0x%x\n",
3342
wr_id_idx, cqe->wr_id, hwcqe->status);
3343
cqe++;
3344
(*budget)--;
3345
bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons,
3346
rq->swq[wr_id_idx].slots,
3347
&rq->dbinfo.flags);
3348
rq->swq_last = rq->swq[rq->swq_last].next_idx;
3349
*pcqe = cqe;
3350
3351
if (hwcqe->status != CQ_RES_RC_STATUS_OK)
3352
bnxt_qplib_mark_qp_error(qp);
3353
}
3354
done:
3355
return rc;
3356
}
3357
3358
static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
3359
struct cq_terminal *hwcqe,
3360
struct bnxt_qplib_cqe **pcqe,
3361
int *budget)
3362
{
3363
struct bnxt_qplib_q *sq, *rq;
3364
struct bnxt_qplib_cqe *cqe;
3365
struct bnxt_qplib_qp *qp;
3366
u32 swq_last;
3367
u32 cqe_cons;
3368
int rc = 0;
3369
3370
/* Check the Status */
3371
if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
3372
dev_warn(&cq->hwq.pdev->dev,
3373
"QPLIB: FP: CQ Process Terminal Error status = 0x%x\n",
3374
hwcqe->status);
3375
3376
qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
3377
if (!qp)
3378
return -EINVAL;
3379
dev_dbg(&cq->hwq.pdev->dev,
3380
"QPLIB: FP: CQ Process terminal for qp (0x%x)\n", qp->id);
3381
3382
/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
3383
* from the current rq->cons to the rq->prod regardless what the
3384
* rq->cons the terminal CQE indicates.
3385
*/
3386
bnxt_qplib_mark_qp_error(qp);
3387
3388
sq = &qp->sq;
3389
rq = &qp->rq;
3390
3391
cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
3392
if (cqe_cons == 0xFFFF)
3393
goto do_rq;
3394
3395
cqe_cons %= sq->max_sw_wqe;
3396
if (qp->sq.flushed) {
3397
dev_dbg(&cq->hwq.pdev->dev,
3398
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
3399
goto sq_done;
3400
}
3401
3402
/* Terminal CQE can also include aggregated successful CQEs prior.
3403
So we must complete all CQEs from the current sq's cons to the
3404
cq_cons with status OK */
3405
cqe = *pcqe;
3406
while (*budget) {
3407
/*sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);*/
3408
swq_last = sq->swq_last;
3409
if (swq_last == cqe_cons)
3410
break;
3411
if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
3412
memset(cqe, 0, sizeof(*cqe));
3413
cqe->status = CQ_REQ_STATUS_OK;
3414
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
3415
cqe->qp_handle = (u64)qp;
3416
cqe->src_qp = qp->id;
3417
cqe->wr_id = sq->swq[swq_last].wr_id;
3418
cqe->type = sq->swq[swq_last].type;
3419
dev_dbg(&cq->hwq.pdev->dev,
3420
"QPLIB: FP: CQ Processed terminal Req \n");
3421
dev_dbg(&cq->hwq.pdev->dev,
3422
"QPLIB: wr_id[%d] = 0x%llx with status 0x%x\n",
3423
swq_last, cqe->wr_id, cqe->status);
3424
cqe++;
3425
(*budget)--;
3426
}
3427
bnxt_qplib_hwq_incr_cons(sq->hwq.depth, &sq->hwq.cons,
3428
sq->swq[swq_last].slots,
3429
&sq->dbinfo.flags);
3430
sq->swq_last = sq->swq[swq_last].next_idx;
3431
}
3432
*pcqe = cqe;
3433
if (!*budget && swq_last != cqe_cons) {
3434
/* Out of budget */
3435
rc = -EAGAIN;
3436
goto sq_done;
3437
}
3438
sq_done:
3439
if (rc)
3440
return rc;
3441
do_rq:
3442
cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
3443
if (cqe_cons == 0xFFFF) {
3444
goto done;
3445
} else if (cqe_cons > (rq->max_wqe - 1)) {
3446
dev_err(&cq->hwq.pdev->dev,
3447
"QPLIB: FP: CQ Processed terminal \n");
3448
dev_err(&cq->hwq.pdev->dev,
3449
"QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x\n",
3450
cqe_cons, rq->hwq.depth);
3451
goto done;
3452
}
3453
if (qp->rq.flushed) {
3454
dev_dbg(&cq->hwq.pdev->dev,
3455
"%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
3456
rc = 0;
3457
goto rq_done;
3458
}
3459
3460
rq_done:
3461
done:
3462
return rc;
3463
}
3464
3465
static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3466
struct cq_cutoff *hwcqe)
3467
{
3468
/* Check the Status */
3469
if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3470
dev_err(&cq->hwq.pdev->dev,
3471
"QPLIB: FP: CQ Process Cutoff Error status = 0x%x\n",
3472
hwcqe->status);
3473
return -EINVAL;
3474
}
3475
clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3476
wake_up_interruptible(&cq->waitq);
3477
3478
dev_dbg(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Cutoff\n");
3479
return 0;
3480
}
3481
3482
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3483
struct bnxt_qplib_cqe *cqe,
3484
int num_cqes)
3485
{
3486
struct bnxt_qplib_qp *qp = NULL;
3487
u32 budget = num_cqes;
3488
unsigned long flags;
3489
3490
spin_lock_irqsave(&cq->flush_lock, flags);
3491
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3492
dev_dbg(&cq->hwq.pdev->dev,
3493
"QPLIB: FP: Flushing SQ QP= %p\n",
3494
qp);
3495
__flush_sq(&qp->sq, qp, &cqe, &budget);
3496
}
3497
3498
list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3499
dev_dbg(&cq->hwq.pdev->dev,
3500
"QPLIB: FP: Flushing RQ QP= %p\n",
3501
qp);
3502
__flush_rq(&qp->rq, qp, &cqe, &budget);
3503
}
3504
spin_unlock_irqrestore(&cq->flush_lock, flags);
3505
3506
return num_cqes - budget;
3507
}
3508
3509
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3510
int num_cqes, struct bnxt_qplib_qp **lib_qp)
3511
{
3512
struct cq_base *hw_cqe;
3513
u32 hw_polled = 0;
3514
int budget, rc = 0;
3515
u8 type;
3516
3517
budget = num_cqes;
3518
3519
while (budget) {
3520
hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3521
3522
/* Check for Valid bit */
3523
if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3524
break;
3525
3526
/* The valid test of the entry must be done first before
3527
* reading any further.
3528
*/
3529
dma_rmb();
3530
/* From the device's respective CQE format to qplib_wc*/
3531
type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3532
switch (type) {
3533
case CQ_BASE_CQE_TYPE_REQ:
3534
rc = bnxt_qplib_cq_process_req(cq,
3535
(struct cq_req *)hw_cqe, &cqe, &budget,
3536
cq->hwq.cons, lib_qp);
3537
break;
3538
case CQ_BASE_CQE_TYPE_RES_RC:
3539
rc = bnxt_qplib_cq_process_res_rc(cq,
3540
(struct cq_res_rc *)hw_cqe, &cqe,
3541
&budget);
3542
break;
3543
case CQ_BASE_CQE_TYPE_RES_UD:
3544
rc = bnxt_qplib_cq_process_res_ud(cq,
3545
(struct cq_res_ud_v2 *)hw_cqe,
3546
&cqe, &budget);
3547
break;
3548
case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3549
rc = bnxt_qplib_cq_process_res_raweth_qp1(cq,
3550
(struct cq_res_raweth_qp1 *)
3551
hw_cqe, &cqe, &budget);
3552
break;
3553
case CQ_BASE_CQE_TYPE_TERMINAL:
3554
rc = bnxt_qplib_cq_process_terminal(cq,
3555
(struct cq_terminal *)hw_cqe,
3556
&cqe, &budget);
3557
break;
3558
case CQ_BASE_CQE_TYPE_CUT_OFF:
3559
bnxt_qplib_cq_process_cutoff(cq,
3560
(struct cq_cutoff *)hw_cqe);
3561
/* Done processing this CQ */
3562
goto exit;
3563
default:
3564
dev_err(&cq->hwq.pdev->dev,
3565
"QPLIB: process_cq unknown type 0x%x\n",
3566
hw_cqe->cqe_type_toggle &
3567
CQ_BASE_CQE_TYPE_MASK);
3568
rc = -EINVAL;
3569
break;
3570
}
3571
if (rc < 0) {
3572
dev_dbg(&cq->hwq.pdev->dev,
3573
"QPLIB: process_cqe rc = 0x%x\n", rc);
3574
if (rc == -EAGAIN)
3575
break;
3576
/* Error while processing the CQE, just skip to the
3577
next one */
3578
if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3579
dev_err(&cq->hwq.pdev->dev,
3580
"QPLIB: process_cqe error rc = 0x%x\n",
3581
rc);
3582
}
3583
hw_polled++;
3584
bnxt_qplib_hwq_incr_cons(cq->hwq.depth, &cq->hwq.cons,
3585
1, &cq->dbinfo.flags);
3586
}
3587
if (hw_polled)
3588
bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3589
exit:
3590
return num_cqes - budget;
3591
}
3592
3593
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3594
{
3595
cq->dbinfo.toggle = cq->toggle;
3596
if (arm_type)
3597
bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3598
/* Using cq->arm_state variable to track whether to issue cq handler */
3599
atomic_set(&cq->arm_state, 1);
3600
}
3601
3602
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3603
{
3604
flush_workqueue(qp->scq->nq->cqn_wq);
3605
if (qp->scq != qp->rcq)
3606
flush_workqueue(qp->rcq->nq->cqn_wq);
3607
}
3608
3609