Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/bnxt/bnxt_en/bnxt_txrx.c
105671 views
1
/*-
2
* Broadcom NetXtreme-C/E network driver.
3
*
4
* Copyright (c) 2016 Broadcom, All Rights Reserved.
5
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26
* THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/types.h>
30
#include <sys/socket.h>
31
#include <sys/endian.h>
32
#include <net/if.h>
33
#include <net/if_var.h>
34
#include <net/ethernet.h>
35
#include <net/iflib.h>
36
37
#include "opt_inet.h"
38
#include "opt_inet6.h"
39
#include "opt_rss.h"
40
41
#include "bnxt.h"
42
43
/*
44
* Function prototypes
45
*/
46
47
static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
48
static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
49
static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
50
51
static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
52
53
/* uint16_t rxqid, uint8_t flid,
54
uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
55
uint16_t buf_size);
56
*/
57
static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
58
qidx_t pidx);
59
static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
60
qidx_t budget);
61
static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
62
63
static int bnxt_intr(void *sc);
64
65
struct if_txrx bnxt_txrx = {
66
.ift_txd_encap = bnxt_isc_txd_encap,
67
.ift_txd_flush = bnxt_isc_txd_flush,
68
.ift_txd_credits_update = bnxt_isc_txd_credits_update,
69
.ift_rxd_available = bnxt_isc_rxd_available,
70
.ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get,
71
.ift_rxd_refill = bnxt_isc_rxd_refill,
72
.ift_rxd_flush = bnxt_isc_rxd_flush,
73
.ift_legacy_intr = bnxt_intr
74
};
75
76
/*
77
* Device Dependent Packet Transmit and Receive Functions
78
*/
79
80
static const uint16_t bnxt_tx_lhint[] = {
81
TX_BD_SHORT_FLAGS_LHINT_LT512,
82
TX_BD_SHORT_FLAGS_LHINT_LT1K,
83
TX_BD_SHORT_FLAGS_LHINT_LT2K,
84
TX_BD_SHORT_FLAGS_LHINT_LT2K,
85
TX_BD_SHORT_FLAGS_LHINT_GTE2K,
86
};
87
88
static int
89
bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
90
{
91
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
92
struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
93
struct tx_bd_long *tbd;
94
struct tx_bd_long_hi *tbdh;
95
bool need_hi = false;
96
uint16_t flags_type;
97
uint16_t lflags;
98
uint32_t cfa_meta;
99
int seg = 0;
100
uint8_t wrap = 0;
101
102
/* If we have offloads enabled, we need to use two BDs. */
103
if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
104
pi->ipi_mflags & M_VLANTAG)
105
need_hi = true;
106
107
/* TODO: Devices before Cu+B1 need to not mix long and short BDs */
108
need_hi = true;
109
110
pi->ipi_new_pidx = pi->ipi_pidx;
111
tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
112
pi->ipi_ndescs = 0;
113
/* No need to byte-swap the opaque value */
114
tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
115
tbd->len = htole16(pi->ipi_segs[seg].ds_len);
116
tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
117
flags_type = ((pi->ipi_nsegs + need_hi) <<
118
TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
119
if (pi->ipi_len >= 2048)
120
flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
121
else
122
flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
123
124
if (need_hi) {
125
flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
126
127
/* Handle wrapping */
128
if (pi->ipi_new_pidx == txr->ring_size - 1)
129
wrap = 1;
130
131
pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
132
133
/* Toggle epoch bit on wrap */
134
if (wrap && pi->ipi_new_pidx == 0)
135
txr->epoch_bit = !txr->epoch_bit;
136
if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
137
txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
138
139
tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
140
tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz);
141
tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
142
pi->ipi_tcp_hlen) >> 1);
143
tbdh->cfa_action = 0;
144
lflags = 0;
145
cfa_meta = 0;
146
if (pi->ipi_mflags & M_VLANTAG) {
147
/* TODO: Do we need to byte-swap the vtag here? */
148
cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
149
pi->ipi_vtag;
150
cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
151
}
152
tbdh->cfa_meta = htole32(cfa_meta);
153
if (pi->ipi_csum_flags & CSUM_TSO) {
154
lflags |= TX_BD_LONG_LFLAGS_LSO |
155
TX_BD_LONG_LFLAGS_T_IPID;
156
}
157
else {
158
if (pi->ipi_csum_flags & CSUM_IP) {
159
lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
160
}
161
switch (pi->ipi_ipproto) {
162
case IPPROTO_TCP:
163
if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) {
164
lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
165
}
166
break;
167
case IPPROTO_UDP:
168
if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) {
169
lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
170
}
171
break;
172
}
173
}
174
tbdh->lflags = htole16(lflags);
175
}
176
else {
177
flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
178
}
179
180
for (; seg < pi->ipi_nsegs; seg++) {
181
tbd->flags_type = htole16(flags_type);
182
183
if (pi->ipi_new_pidx == txr->ring_size - 1)
184
wrap = 1;
185
pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
186
if (wrap && pi->ipi_new_pidx == 0)
187
txr->epoch_bit = !txr->epoch_bit;
188
if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
189
txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
190
191
tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
192
tbd->len = htole16(pi->ipi_segs[seg].ds_len);
193
tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
194
flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
195
}
196
flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
197
tbd->flags_type = htole16(flags_type);
198
if (pi->ipi_new_pidx == txr->ring_size - 1)
199
wrap = 1;
200
pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
201
if (wrap && pi->ipi_new_pidx == 0)
202
txr->epoch_bit = !txr->epoch_bit;
203
if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
204
txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
205
206
return 0;
207
}
208
209
static void
210
bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
211
{
212
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
213
struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
214
215
/* pidx is what we last set ipi_new_pidx to */
216
softc->db_ops.bnxt_db_tx(tx_ring, pidx);
217
return;
218
}
219
220
static int
221
bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
222
{
223
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
224
struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
225
struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
226
int avail = 0;
227
uint32_t cons = cpr->cons;
228
uint32_t raw_cons = cpr->raw_cons;
229
bool v_bit = cpr->v_bit;
230
bool last_v_bit;
231
uint32_t last_cons;
232
uint32_t last_raw_cons;
233
uint16_t type;
234
uint16_t err;
235
236
for (;;) {
237
last_cons = cons;
238
last_raw_cons = raw_cons;
239
last_v_bit = v_bit;
240
241
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
242
raw_cons++;
243
CMPL_PREFETCH_NEXT(cpr, cons);
244
245
if (!CMP_VALID(&cmpl[cons], v_bit))
246
goto done;
247
248
type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
249
switch (type) {
250
case TX_CMPL_TYPE_TX_L2:
251
err = (le16toh(cmpl[cons].errors_v) &
252
TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
253
TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
254
if (err)
255
device_printf(softc->dev,
256
"TX completion error %u\n", err);
257
/* No need to byte-swap the opaque value */
258
avail += cmpl[cons].opaque >> 24;
259
/*
260
* If we're not clearing, iflib only cares if there's
261
* at least one buffer. Don't scan the whole ring in
262
* this case.
263
*/
264
if (!clear)
265
goto done;
266
break;
267
default:
268
if (type & 1) {
269
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
270
raw_cons++;
271
if (!CMP_VALID(&cmpl[cons], v_bit)) {
272
goto done;
273
}
274
}
275
device_printf(softc->dev,
276
"Unhandled TX completion type %u\n", type);
277
break;
278
}
279
}
280
done:
281
282
if (clear && avail) {
283
cpr->cons = last_cons;
284
cpr->raw_cons = last_raw_cons;
285
cpr->v_bit = last_v_bit;
286
softc->db_ops.bnxt_db_tx_cq(cpr, 0);
287
}
288
289
return avail;
290
}
291
292
static void
293
bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
294
{
295
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
296
struct bnxt_ring *rx_ring;
297
struct rx_prod_pkt_bd *rxbd;
298
uint16_t type;
299
uint16_t i;
300
uint16_t rxqid;
301
uint16_t count;
302
uint32_t pidx;
303
uint8_t flid;
304
uint64_t *paddrs;
305
qidx_t *frag_idxs;
306
307
rxqid = iru->iru_qsidx;
308
count = iru->iru_count;
309
pidx = iru->iru_pidx;
310
flid = iru->iru_flidx;
311
paddrs = iru->iru_paddrs;
312
frag_idxs = iru->iru_idxs;
313
314
if (flid == 0) {
315
rx_ring = &softc->rx_rings[rxqid];
316
type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
317
}
318
else {
319
rx_ring = &softc->ag_rings[rxqid];
320
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
321
}
322
rxbd = (void *)rx_ring->vaddr;
323
324
for (i=0; i<count; i++) {
325
rxbd[pidx].flags_type = htole16(type);
326
rxbd[pidx].len = htole16(softc->rx_buf_size);
327
/* No need to byte-swap the opaque value */
328
rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
329
| (frag_idxs[i]));
330
rxbd[pidx].addr = htole64(paddrs[i]);
331
332
/* Increment pidx and handle wrap-around */
333
if (++pidx == rx_ring->ring_size) {
334
pidx = 0;
335
rx_ring->epoch_bit = !rx_ring->epoch_bit;
336
}
337
if (pidx < EPOCH_ARR_SZ)
338
rx_ring->epoch_arr[pidx] = rx_ring->epoch_bit;
339
}
340
341
return;
342
}
343
344
static void
345
bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
346
qidx_t pidx)
347
{
348
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
349
struct bnxt_ring *rx_ring;
350
351
if (flid == 0)
352
rx_ring = &softc->rx_rings[rxqid];
353
else
354
rx_ring = &softc->ag_rings[rxqid];
355
356
/*
357
* We *must* update the completion ring before updating the RX ring
358
* or we will overrun the completion ring and the device will wedge for
359
* RX.
360
*/
361
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[rxqid], 0);
362
softc->db_ops.bnxt_db_rx(rx_ring, pidx);
363
return;
364
}
365
366
static int
367
bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
368
{
369
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
370
struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
371
struct rx_pkt_cmpl *rcp;
372
struct rx_tpa_end_cmpl *rtpae;
373
struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
374
int avail = 0;
375
uint32_t cons = cpr->cons;
376
bool v_bit = cpr->v_bit;
377
uint8_t ags;
378
int i;
379
uint16_t type;
380
381
for (;;) {
382
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
383
CMPL_PREFETCH_NEXT(cpr, cons);
384
385
if (!CMP_VALID(&cmp[cons], v_bit))
386
goto cmpl_invalid;
387
388
type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
389
switch (type) {
390
case CMPL_BASE_TYPE_RX_L2:
391
case CMPL_BASE_TYPE_RX_L2_V3:
392
rcp = (void *)&cmp[cons];
393
ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
394
RX_PKT_CMPL_AGG_BUFS_SFT;
395
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
396
CMPL_PREFETCH_NEXT(cpr, cons);
397
398
if (!CMP_VALID(&cmp[cons], v_bit))
399
goto cmpl_invalid;
400
401
/* Now account for all the AG completions */
402
for (i=0; i<ags; i++) {
403
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
404
CMPL_PREFETCH_NEXT(cpr, cons);
405
if (!CMP_VALID(&cmp[cons], v_bit))
406
goto cmpl_invalid;
407
}
408
avail++;
409
break;
410
case CMPL_BASE_TYPE_RX_TPA_END:
411
rtpae = (void *)&cmp[cons];
412
ags = (rtpae->agg_bufs_v1 &
413
RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
414
RX_TPA_END_CMPL_AGG_BUFS_SFT;
415
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
416
CMPL_PREFETCH_NEXT(cpr, cons);
417
418
if (!CMP_VALID(&cmp[cons], v_bit))
419
goto cmpl_invalid;
420
/* Now account for all the AG completions */
421
for (i=0; i<ags; i++) {
422
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
423
CMPL_PREFETCH_NEXT(cpr, cons);
424
if (!CMP_VALID(&cmp[cons], v_bit))
425
goto cmpl_invalid;
426
}
427
avail++;
428
break;
429
case CMPL_BASE_TYPE_RX_TPA_START:
430
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
431
CMPL_PREFETCH_NEXT(cpr, cons);
432
433
if (!CMP_VALID(&cmp[cons], v_bit))
434
goto cmpl_invalid;
435
break;
436
case CMPL_BASE_TYPE_RX_AGG:
437
break;
438
default:
439
device_printf(softc->dev,
440
"Unhandled completion type %d on RXQ %d\n",
441
type, rxqid);
442
443
/* Odd completion types use two completions */
444
if (type & 1) {
445
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
446
CMPL_PREFETCH_NEXT(cpr, cons);
447
448
if (!CMP_VALID(&cmp[cons], v_bit))
449
goto cmpl_invalid;
450
}
451
break;
452
}
453
if (avail > budget)
454
break;
455
}
456
cmpl_invalid:
457
458
return avail;
459
}
460
461
static void
462
bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
463
{
464
uint8_t rss_profile_id;
465
466
rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
467
switch (rss_profile_id) {
468
case BNXT_RSS_HASH_TYPE_TCPV4:
469
ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
470
break;
471
case BNXT_RSS_HASH_TYPE_UDPV4:
472
ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
473
break;
474
case BNXT_RSS_HASH_TYPE_IPV4:
475
ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
476
break;
477
case BNXT_RSS_HASH_TYPE_TCPV6:
478
ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
479
break;
480
case BNXT_RSS_HASH_TYPE_UDPV6:
481
ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
482
break;
483
case BNXT_RSS_HASH_TYPE_IPV6:
484
ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
485
break;
486
default:
487
ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
488
break;
489
}
490
}
491
492
static int
493
bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
494
struct bnxt_cp_ring *cpr, uint16_t flags_type)
495
{
496
struct rx_pkt_cmpl *rcp;
497
struct rx_pkt_cmpl_hi *rcph;
498
struct rx_abuf_cmpl *acp;
499
uint32_t flags2;
500
uint32_t errors;
501
uint8_t ags;
502
int i;
503
504
rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
505
506
/* Extract from the first 16-byte BD */
507
if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
508
ri->iri_flowid = le32toh(rcp->rss_hash);
509
bnxt_set_rsstype(ri, rcp->rss_hash_type);
510
}
511
else {
512
ri->iri_rsstype = M_HASHTYPE_NONE;
513
}
514
ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
515
RX_PKT_CMPL_AGG_BUFS_SFT;
516
ri->iri_nfrags = ags + 1;
517
/* No need to byte-swap the opaque value */
518
ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
519
ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
520
ri->iri_frags[0].irf_len = le16toh(rcp->len);
521
ri->iri_len = le16toh(rcp->len);
522
523
/* Now the second 16-byte BD */
524
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
525
cpr->raw_cons++;
526
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
527
rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
528
529
flags2 = le32toh(rcph->flags2);
530
errors = le16toh(rcph->errors_v2);
531
if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
532
RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
533
ri->iri_flags |= M_VLANTAG;
534
/* TODO: Should this be the entire 16-bits? */
535
ri->iri_vtag = le32toh(rcph->metadata) &
536
(RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
537
RX_PKT_CMPL_METADATA_PRI_MASK);
538
}
539
if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
540
ri->iri_csum_flags |= CSUM_IP_CHECKED;
541
if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
542
ri->iri_csum_flags |= CSUM_IP_VALID;
543
}
544
if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
545
RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
546
ri->iri_csum_flags |= CSUM_L4_CALC;
547
if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
548
RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
549
ri->iri_csum_flags |= CSUM_L4_VALID;
550
ri->iri_csum_data = 0xffff;
551
}
552
}
553
554
/* And finally the ag ring stuff. */
555
for (i=1; i < ri->iri_nfrags; i++) {
556
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
557
cpr->raw_cons++;
558
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
559
acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
560
561
/* No need to byte-swap the opaque value */
562
ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
563
ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
564
ri->iri_frags[i].irf_len = le16toh(acp->len);
565
ri->iri_len += le16toh(acp->len);
566
}
567
568
return 0;
569
}
570
571
static int
572
bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
573
struct bnxt_cp_ring *cpr, uint16_t flags_type)
574
{
575
struct rx_tpa_end_cmpl *agend =
576
&((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
577
struct rx_abuf_cmpl *acp;
578
struct bnxt_full_tpa_start *tpas;
579
uint32_t flags2;
580
uint8_t ags;
581
uint8_t agg_id;
582
int i;
583
584
/* Get the agg_id */
585
agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
586
RX_TPA_END_CMPL_AGG_ID_SFT;
587
tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
588
589
/* Extract from the first 16-byte BD */
590
if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
591
ri->iri_flowid = le32toh(tpas->low.rss_hash);
592
bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
593
}
594
else {
595
ri->iri_rsstype = M_HASHTYPE_NONE;
596
}
597
ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
598
RX_TPA_END_CMPL_AGG_BUFS_SFT;
599
ri->iri_nfrags = ags + 1;
600
/* No need to byte-swap the opaque value */
601
ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
602
ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
603
ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
604
ri->iri_len = le16toh(tpas->low.len);
605
606
/* Now the second 16-byte BD */
607
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
608
cpr->raw_cons++;
609
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
610
611
flags2 = le32toh(tpas->high.flags2);
612
if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
613
RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
614
ri->iri_flags |= M_VLANTAG;
615
/* TODO: Should this be the entire 16-bits? */
616
ri->iri_vtag = le32toh(tpas->high.metadata) &
617
(RX_TPA_START_CMPL_METADATA_VID_MASK |
618
RX_TPA_START_CMPL_METADATA_DE |
619
RX_TPA_START_CMPL_METADATA_PRI_MASK);
620
}
621
if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
622
ri->iri_csum_flags |= CSUM_IP_CHECKED;
623
ri->iri_csum_flags |= CSUM_IP_VALID;
624
}
625
if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
626
ri->iri_csum_flags |= CSUM_L4_CALC;
627
ri->iri_csum_flags |= CSUM_L4_VALID;
628
ri->iri_csum_data = 0xffff;
629
}
630
631
/* Now the ag ring stuff. */
632
for (i=1; i < ri->iri_nfrags; i++) {
633
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
634
cpr->raw_cons++;
635
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
636
acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
637
638
/* No need to byte-swap the opaque value */
639
ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
640
ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
641
ri->iri_frags[i].irf_len = le16toh(acp->len);
642
ri->iri_len += le16toh(acp->len);
643
}
644
645
/* And finally, the empty BD at the end... */
646
ri->iri_nfrags++;
647
/* No need to byte-swap the opaque value */
648
ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
649
ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
650
ri->iri_frags[i].irf_len = le16toh(agend->len);
651
ri->iri_len += le16toh(agend->len);
652
653
return 0;
654
}
655
656
/* If we return anything but zero, iflib will assert... */
657
static int
658
bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
659
{
660
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
661
struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
662
struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
663
struct cmpl_base *cmp;
664
struct rx_tpa_start_cmpl *rtpa;
665
uint16_t flags_type;
666
uint16_t type;
667
uint8_t agg_id;
668
669
for (;;) {
670
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
671
cpr->raw_cons++;
672
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
673
CMPL_PREFETCH_NEXT(cpr, cpr->cons);
674
cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
675
676
flags_type = le16toh(cmp->type);
677
type = flags_type & CMPL_BASE_TYPE_MASK;
678
679
switch (type) {
680
case CMPL_BASE_TYPE_RX_L2:
681
case CMPL_BASE_TYPE_RX_L2_V3:
682
return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
683
case CMPL_BASE_TYPE_RX_TPA_END:
684
return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
685
case CMPL_BASE_TYPE_RX_TPA_START:
686
case CMPL_BASE_TYPE_RX_TPA_START_V3:
687
rtpa = (void *)&cmp_q[cpr->cons];
688
agg_id = (rtpa->agg_id &
689
RX_TPA_START_CMPL_AGG_ID_MASK) >>
690
RX_TPA_START_CMPL_AGG_ID_SFT;
691
softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
692
693
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
694
cpr->raw_cons++;
695
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
696
CMPL_PREFETCH_NEXT(cpr, cpr->cons);
697
698
softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
699
((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
700
break;
701
default:
702
device_printf(softc->dev,
703
"Unhandled completion type %d on RXQ %d get\n",
704
type, ri->iri_qsidx);
705
if (type & 1) {
706
NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
707
cpr->v_bit);
708
cpr->raw_cons++;
709
ri->iri_cidx = RING_NEXT(&cpr->ring,
710
ri->iri_cidx);
711
CMPL_PREFETCH_NEXT(cpr, cpr->cons);
712
}
713
break;
714
}
715
}
716
717
return 0;
718
}
719
720
static int
721
bnxt_intr(void *sc)
722
{
723
struct bnxt_softc *softc = (struct bnxt_softc *)sc;
724
725
device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);
726
return ENOSYS;
727
}
728
729