Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/bnxt/bnxt_en/if_bnxt.c
39536 views
1
/*-
2
* Broadcom NetXtreme-C/E network driver.
3
*
4
* Copyright (c) 2016 Broadcom, All Rights Reserved.
5
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26
* THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/socket.h>
31
#include <sys/kernel.h>
32
#include <sys/bus.h>
33
#include <sys/module.h>
34
#include <sys/rman.h>
35
#include <sys/endian.h>
36
#include <sys/sockio.h>
37
#include <sys/priv.h>
38
39
#include <machine/bus.h>
40
#include <machine/resource.h>
41
42
#include <dev/pci/pcireg.h>
43
44
#include <net/if.h>
45
#include <net/if_dl.h>
46
#include <net/if_media.h>
47
#include <net/if_var.h>
48
#include <net/ethernet.h>
49
#include <net/iflib.h>
50
51
#define WANT_NATIVE_PCI_GET_SLOT
52
#include <linux/pci.h>
53
#include <linux/kmod.h>
54
#include <linux/module.h>
55
#include <linux/delay.h>
56
#include <linux/idr.h>
57
#include <linux/netdevice.h>
58
#include <linux/etherdevice.h>
59
#include <linux/rcupdate.h>
60
#include "opt_inet.h"
61
#include "opt_inet6.h"
62
#include "opt_rss.h"
63
64
#include "ifdi_if.h"
65
66
#include "bnxt.h"
67
#include "bnxt_hwrm.h"
68
#include "bnxt_ioctl.h"
69
#include "bnxt_sysctl.h"
70
#include "hsi_struct_def.h"
71
#include "bnxt_mgmt.h"
72
#include "bnxt_ulp.h"
73
#include "bnxt_auxbus_compat.h"
74
75
/*
76
* PCI Device ID Table
77
*/
78
79
static const pci_vendor_info_t bnxt_vendor_info_array[] =
80
{
81
PVID(BROADCOM_VENDOR_ID, BCM57301,
82
"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
83
PVID(BROADCOM_VENDOR_ID, BCM57302,
84
"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
85
PVID(BROADCOM_VENDOR_ID, BCM57304,
86
"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
87
PVID(BROADCOM_VENDOR_ID, BCM57311,
88
"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
89
PVID(BROADCOM_VENDOR_ID, BCM57312,
90
"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
91
PVID(BROADCOM_VENDOR_ID, BCM57314,
92
"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
93
PVID(BROADCOM_VENDOR_ID, BCM57402,
94
"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
95
PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
96
"Broadcom BCM57402 NetXtreme-E Partition"),
97
PVID(BROADCOM_VENDOR_ID, BCM57404,
98
"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
99
PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
100
"Broadcom BCM57404 NetXtreme-E Partition"),
101
PVID(BROADCOM_VENDOR_ID, BCM57406,
102
"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
103
PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
104
"Broadcom BCM57406 NetXtreme-E Partition"),
105
PVID(BROADCOM_VENDOR_ID, BCM57407,
106
"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
107
PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
108
"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
109
PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
110
"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
111
PVID(BROADCOM_VENDOR_ID, BCM57412,
112
"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
113
PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
114
"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
115
PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
116
"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
117
PVID(BROADCOM_VENDOR_ID, BCM57414,
118
"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
119
PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
120
"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
121
PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
122
"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
123
PVID(BROADCOM_VENDOR_ID, BCM57416,
124
"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
125
PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
126
"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
127
PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
128
"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
129
PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
130
"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
131
PVID(BROADCOM_VENDOR_ID, BCM57417,
132
"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
133
PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
134
"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
135
PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
136
"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
137
PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
138
"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
139
PVID(BROADCOM_VENDOR_ID, BCM57454,
140
"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
141
PVID(BROADCOM_VENDOR_ID, BCM58700,
142
"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
143
PVID(BROADCOM_VENDOR_ID, BCM57508,
144
"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
145
PVID(BROADCOM_VENDOR_ID, BCM57504,
146
"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
147
PVID(BROADCOM_VENDOR_ID, BCM57504_NPAR,
148
"Broadcom BCM57504 NetXtreme-E Ethernet Partition"),
149
PVID(BROADCOM_VENDOR_ID, BCM57502,
150
"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
151
PVID(BROADCOM_VENDOR_ID, BCM57608,
152
"Broadcom BCM57608 NetXtreme-E 25Gb/50Gb/100Gb/200Gb/400Gb Ethernet"),
153
PVID(BROADCOM_VENDOR_ID, BCM57604,
154
"Broadcom BCM57604 NetXtreme-E 25Gb/50Gb/100Gb/200Gb Ethernet"),
155
PVID(BROADCOM_VENDOR_ID, BCM57602,
156
"Broadcom BCM57602 NetXtreme-E 25Gb/50Gb Ethernet"),
157
PVID(BROADCOM_VENDOR_ID, BCM57601,
158
"Broadcom BCM57601 NetXtreme-E 25Gb/50Gb Ethernet"),
159
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
160
"Broadcom NetXtreme-C Ethernet Virtual Function"),
161
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
162
"Broadcom NetXtreme-C Ethernet Virtual Function"),
163
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
164
"Broadcom NetXtreme-C Ethernet Virtual Function"),
165
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
166
"Broadcom NetXtreme-E Ethernet Virtual Function"),
167
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
168
"Broadcom NetXtreme-E Ethernet Virtual Function"),
169
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
170
"Broadcom NetXtreme-E Ethernet Virtual Function"),
171
/* required last entry */
172
173
PVID_END
174
};
175
176
/*
177
* Function prototypes
178
*/
179
180
SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
181
int bnxt_num_pfs = 0;
182
183
void
184
process_nq(struct bnxt_softc *softc, uint16_t nqid);
185
static void *bnxt_register(device_t dev);
186
187
/* Soft queue setup and teardown */
188
static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
189
uint64_t *paddrs, int ntxqs, int ntxqsets);
190
static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
191
uint64_t *paddrs, int nrxqs, int nrxqsets);
192
static void bnxt_queues_free(if_ctx_t ctx);
193
194
/* Device setup and teardown */
195
static int bnxt_attach_pre(if_ctx_t ctx);
196
static int bnxt_attach_post(if_ctx_t ctx);
197
static int bnxt_detach(if_ctx_t ctx);
198
199
/* Device configuration */
200
static void bnxt_init(if_ctx_t ctx);
201
static void bnxt_stop(if_ctx_t ctx);
202
static void bnxt_multi_set(if_ctx_t ctx);
203
static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
204
static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
205
static int bnxt_media_change(if_ctx_t ctx);
206
static int bnxt_promisc_set(if_ctx_t ctx, int flags);
207
static uint64_t bnxt_get_counter(if_ctx_t, ift_counter);
208
static void bnxt_update_admin_status(if_ctx_t ctx);
209
static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
210
211
/* Interrupt enable / disable */
212
static void bnxt_intr_enable(if_ctx_t ctx);
213
static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
214
static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
215
static void bnxt_disable_intr(if_ctx_t ctx);
216
static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
217
218
/* vlan support */
219
static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
220
static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
221
222
/* ioctl */
223
static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
224
225
static int bnxt_shutdown(if_ctx_t ctx);
226
static int bnxt_suspend(if_ctx_t ctx);
227
static int bnxt_resume(if_ctx_t ctx);
228
229
/* Internal support functions */
230
static int bnxt_probe_phy(struct bnxt_softc *softc);
231
static void bnxt_add_media_types(struct bnxt_softc *softc);
232
static int bnxt_pci_mapping(struct bnxt_softc *softc);
233
static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
234
static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
235
static int bnxt_handle_def_cp(void *arg);
236
static int bnxt_handle_isr(void *arg);
237
static void bnxt_clear_ids(struct bnxt_softc *softc);
238
static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
239
static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
240
static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
241
static void bnxt_def_cp_task(void *context, int pending);
242
static void bnxt_handle_async_event(struct bnxt_softc *softc,
243
struct cmpl_base *cmpl);
244
static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
245
static void bnxt_get_wol_settings(struct bnxt_softc *softc);
246
static int bnxt_wol_config(if_ctx_t ctx);
247
static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
248
static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
249
static void bnxt_get_port_module_status(struct bnxt_softc *softc);
250
static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
251
static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
252
static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
253
void bnxt_queue_sp_work(struct bnxt_softc *bp);
254
255
void bnxt_fw_reset(struct bnxt_softc *bp);
256
/*
257
* Device Interface Declaration
258
*/
259
260
static device_method_t bnxt_methods[] = {
261
/* Device interface */
262
DEVMETHOD(device_register, bnxt_register),
263
DEVMETHOD(device_probe, iflib_device_probe),
264
DEVMETHOD(device_attach, iflib_device_attach),
265
DEVMETHOD(device_detach, iflib_device_detach),
266
DEVMETHOD(device_shutdown, iflib_device_shutdown),
267
DEVMETHOD(device_suspend, iflib_device_suspend),
268
DEVMETHOD(device_resume, iflib_device_resume),
269
DEVMETHOD_END
270
};
271
272
static driver_t bnxt_driver = {
273
"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
274
};
275
276
DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
277
278
MODULE_LICENSE("Dual BSD/GPL");
279
MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
280
MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
281
MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
282
MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
283
MODULE_VERSION(if_bnxt, 1);
284
285
IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
286
287
void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
288
u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
289
290
u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
291
{
292
293
if (!bar_idx)
294
return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
295
else
296
return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
297
}
298
299
void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
300
{
301
302
if (!bar_idx)
303
bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
304
else
305
bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
306
}
307
308
static DEFINE_IDA(bnxt_aux_dev_ids);
309
310
static device_method_t bnxt_iflib_methods[] = {
311
DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
312
DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
313
DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
314
315
DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
316
DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
317
DEVMETHOD(ifdi_detach, bnxt_detach),
318
319
DEVMETHOD(ifdi_init, bnxt_init),
320
DEVMETHOD(ifdi_stop, bnxt_stop),
321
DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
322
DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
323
DEVMETHOD(ifdi_media_status, bnxt_media_status),
324
DEVMETHOD(ifdi_media_change, bnxt_media_change),
325
DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
326
DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
327
DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
328
DEVMETHOD(ifdi_timer, bnxt_if_timer),
329
330
DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
331
DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
332
DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
333
DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
334
DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
335
336
DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
337
DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
338
339
DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
340
341
DEVMETHOD(ifdi_suspend, bnxt_suspend),
342
DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
343
DEVMETHOD(ifdi_resume, bnxt_resume),
344
DEVMETHOD(ifdi_i2c_req, bnxt_i2c_req),
345
346
DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
347
348
DEVMETHOD_END
349
};
350
351
static driver_t bnxt_iflib_driver = {
352
"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
353
};
354
355
/*
356
* iflib shared context
357
*/
358
359
#define BNXT_DRIVER_VERSION "230.0.133.0"
360
const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
361
extern struct if_txrx bnxt_txrx;
362
static struct if_shared_ctx bnxt_sctx_init = {
363
.isc_magic = IFLIB_MAGIC,
364
.isc_driver = &bnxt_iflib_driver,
365
.isc_nfl = 2, // Number of Free Lists
366
.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
367
.isc_q_align = PAGE_SIZE,
368
.isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
369
.isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
370
.isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
371
.isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
372
.isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
373
.isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
374
375
// Only use a single segment to avoid page size constraints
376
.isc_rx_nsegments = 1,
377
.isc_ntxqs = 3,
378
.isc_nrxqs = 3,
379
.isc_nrxd_min = {16, 16, 16},
380
.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
381
PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
382
PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
383
.isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
384
.isc_ntxd_min = {16, 16, 16},
385
.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
386
PAGE_SIZE / sizeof(struct tx_bd_short),
387
/* NQ depth 4096 */
388
PAGE_SIZE / sizeof(struct cmpl_base) * 16},
389
.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
390
391
.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
392
.isc_vendor_info = bnxt_vendor_info_array,
393
.isc_driver_version = bnxt_driver_version,
394
};
395
396
#define PCI_SUBSYSTEM_ID 0x2e
397
static struct workqueue_struct *bnxt_pf_wq;
398
399
extern void bnxt_destroy_irq(struct bnxt_softc *softc);
400
401
/*
402
* Device Methods
403
*/
404
405
static void *
406
bnxt_register(device_t dev)
407
{
408
return (&bnxt_sctx_init);
409
}
410
411
static void
412
bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
413
{
414
415
if (softc->nq_rings)
416
return;
417
418
softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
419
M_DEVBUF, M_NOWAIT | M_ZERO);
420
}
421
422
static void
423
bnxt_nq_free(struct bnxt_softc *softc)
424
{
425
426
if (softc->nq_rings)
427
free(softc->nq_rings, M_DEVBUF);
428
softc->nq_rings = NULL;
429
}
430
431
432
static void
433
bnxt_set_db_mask(struct bnxt_softc *bp, struct bnxt_ring *db,
434
u32 ring_type)
435
{
436
if (BNXT_CHIP_P7(bp)) {
437
db->db_epoch_mask = db->db_ring_mask + 1;
438
db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
439
440
}
441
}
442
443
/*
444
* Device Dependent Configuration Functions
445
*/
446
447
/* Soft queue setup and teardown */
448
static int
449
bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
450
uint64_t *paddrs, int ntxqs, int ntxqsets)
451
{
452
struct bnxt_softc *softc;
453
int i;
454
int rc;
455
456
softc = iflib_get_softc(ctx);
457
458
if (BNXT_CHIP_P5_PLUS(softc)) {
459
bnxt_nq_alloc(softc, ntxqsets);
460
if (!softc->nq_rings) {
461
device_printf(iflib_get_dev(ctx),
462
"unable to allocate NQ rings\n");
463
rc = ENOMEM;
464
goto nq_alloc_fail;
465
}
466
}
467
468
softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
469
M_DEVBUF, M_NOWAIT | M_ZERO);
470
if (!softc->tx_cp_rings) {
471
device_printf(iflib_get_dev(ctx),
472
"unable to allocate TX completion rings\n");
473
rc = ENOMEM;
474
goto cp_alloc_fail;
475
}
476
softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
477
M_DEVBUF, M_NOWAIT | M_ZERO);
478
if (!softc->tx_rings) {
479
device_printf(iflib_get_dev(ctx),
480
"unable to allocate TX rings\n");
481
rc = ENOMEM;
482
goto ring_alloc_fail;
483
}
484
485
for (i=0; i < ntxqsets; i++) {
486
rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
487
&softc->tx_stats[i], 0);
488
if (rc)
489
goto dma_alloc_fail;
490
bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
491
BUS_DMASYNC_PREREAD);
492
}
493
494
for (i = 0; i < ntxqsets; i++) {
495
/* Set up the completion ring */
496
softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
497
softc->tx_cp_rings[i].ring.phys_id =
498
(uint16_t)HWRM_NA_SIGNATURE;
499
softc->tx_cp_rings[i].ring.softc = softc;
500
softc->tx_cp_rings[i].ring.idx = i;
501
softc->tx_cp_rings[i].ring.id =
502
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
503
softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
504
softc->legacy_db_size: softc->tx_cp_rings[i].ring.id * 0x80;
505
softc->tx_cp_rings[i].ring.ring_size =
506
softc->scctx->isc_ntxd[0];
507
softc->tx_cp_rings[i].ring.db_ring_mask =
508
softc->tx_cp_rings[i].ring.ring_size - 1;
509
softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
510
softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
511
512
513
/* Set up the TX ring */
514
softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
515
softc->tx_rings[i].softc = softc;
516
softc->tx_rings[i].idx = i;
517
softc->tx_rings[i].id =
518
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
519
softc->tx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
520
softc->legacy_db_size : softc->tx_rings[i].id * 0x80;
521
softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
522
softc->tx_rings[i].db_ring_mask = softc->tx_rings[i].ring_size - 1;
523
softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
524
softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
525
526
bnxt_create_tx_sysctls(softc, i);
527
528
if (BNXT_CHIP_P5_PLUS(softc)) {
529
/* Set up the Notification ring (NQ) */
530
softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
531
softc->nq_rings[i].ring.phys_id =
532
(uint16_t)HWRM_NA_SIGNATURE;
533
softc->nq_rings[i].ring.softc = softc;
534
softc->nq_rings[i].ring.idx = i;
535
softc->nq_rings[i].ring.id = i;
536
softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
537
softc->legacy_db_size : softc->nq_rings[i].ring.id * 0x80;
538
softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
539
softc->nq_rings[i].ring.db_ring_mask = softc->nq_rings[i].ring.ring_size - 1;
540
softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
541
softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
542
softc->nq_rings[i].type = Q_TYPE_TX;
543
}
544
}
545
546
softc->ntxqsets = ntxqsets;
547
return rc;
548
549
dma_alloc_fail:
550
for (i = i - 1; i >= 0; i--)
551
iflib_dma_free(&softc->tx_stats[i]);
552
free(softc->tx_rings, M_DEVBUF);
553
ring_alloc_fail:
554
free(softc->tx_cp_rings, M_DEVBUF);
555
cp_alloc_fail:
556
bnxt_nq_free(softc);
557
nq_alloc_fail:
558
return rc;
559
}
560
561
static void
562
bnxt_queues_free(if_ctx_t ctx)
563
{
564
struct bnxt_softc *softc = iflib_get_softc(ctx);
565
int i;
566
567
// Free TX queues
568
for (i=0; i<softc->ntxqsets; i++)
569
iflib_dma_free(&softc->tx_stats[i]);
570
free(softc->tx_rings, M_DEVBUF);
571
softc->tx_rings = NULL;
572
free(softc->tx_cp_rings, M_DEVBUF);
573
softc->tx_cp_rings = NULL;
574
softc->ntxqsets = 0;
575
576
// Free RX queues
577
for (i=0; i<softc->nrxqsets; i++)
578
iflib_dma_free(&softc->rx_stats[i]);
579
iflib_dma_free(&softc->hw_tx_port_stats);
580
iflib_dma_free(&softc->hw_rx_port_stats);
581
iflib_dma_free(&softc->hw_tx_port_stats_ext);
582
iflib_dma_free(&softc->hw_rx_port_stats_ext);
583
free(softc->grp_info, M_DEVBUF);
584
free(softc->ag_rings, M_DEVBUF);
585
free(softc->rx_rings, M_DEVBUF);
586
free(softc->rx_cp_rings, M_DEVBUF);
587
bnxt_nq_free(softc);
588
}
589
590
static int
591
bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
592
uint64_t *paddrs, int nrxqs, int nrxqsets)
593
{
594
struct bnxt_softc *softc;
595
int i;
596
int rc;
597
598
softc = iflib_get_softc(ctx);
599
600
softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
601
M_DEVBUF, M_NOWAIT | M_ZERO);
602
if (!softc->rx_cp_rings) {
603
device_printf(iflib_get_dev(ctx),
604
"unable to allocate RX completion rings\n");
605
rc = ENOMEM;
606
goto cp_alloc_fail;
607
}
608
softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
609
M_DEVBUF, M_NOWAIT | M_ZERO);
610
if (!softc->rx_rings) {
611
device_printf(iflib_get_dev(ctx),
612
"unable to allocate RX rings\n");
613
rc = ENOMEM;
614
goto ring_alloc_fail;
615
}
616
softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
617
M_DEVBUF, M_NOWAIT | M_ZERO);
618
if (!softc->ag_rings) {
619
device_printf(iflib_get_dev(ctx),
620
"unable to allocate aggregation rings\n");
621
rc = ENOMEM;
622
goto ag_alloc_fail;
623
}
624
softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
625
M_DEVBUF, M_NOWAIT | M_ZERO);
626
if (!softc->grp_info) {
627
device_printf(iflib_get_dev(ctx),
628
"unable to allocate ring groups\n");
629
rc = ENOMEM;
630
goto grp_alloc_fail;
631
}
632
633
for (i=0; i < nrxqsets; i++) {
634
rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
635
&softc->rx_stats[i], 0);
636
if (rc)
637
goto hw_stats_alloc_fail;
638
bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
639
BUS_DMASYNC_PREREAD);
640
}
641
642
/*
643
* Additional 512 bytes for future expansion.
644
* To prevent corruption when loaded with newer firmwares with added counters.
645
* This can be deleted when there will be no further additions of counters.
646
*/
647
#define BNXT_PORT_STAT_PADDING 512
648
649
rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
650
&softc->hw_rx_port_stats, 0);
651
if (rc)
652
goto hw_port_rx_stats_alloc_fail;
653
654
bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
655
softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
656
657
658
rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
659
&softc->hw_tx_port_stats, 0);
660
if (rc)
661
goto hw_port_tx_stats_alloc_fail;
662
663
bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
664
softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
665
666
softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
667
softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
668
669
670
rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
671
&softc->hw_rx_port_stats_ext, 0);
672
if (rc)
673
goto hw_port_rx_stats_ext_alloc_fail;
674
675
bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
676
softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
677
678
rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
679
&softc->hw_tx_port_stats_ext, 0);
680
if (rc)
681
goto hw_port_tx_stats_ext_alloc_fail;
682
683
bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
684
softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
685
686
softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
687
softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
688
689
for (i = 0; i < nrxqsets; i++) {
690
/* Allocation the completion ring */
691
softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
692
softc->rx_cp_rings[i].ring.phys_id =
693
(uint16_t)HWRM_NA_SIGNATURE;
694
softc->rx_cp_rings[i].ring.softc = softc;
695
softc->rx_cp_rings[i].ring.idx = i;
696
softc->rx_cp_rings[i].ring.id = i + 1;
697
softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
698
softc->legacy_db_size : softc->rx_cp_rings[i].ring.id * 0x80;
699
/*
700
* If this ring overflows, RX stops working.
701
*/
702
softc->rx_cp_rings[i].ring.ring_size =
703
softc->scctx->isc_nrxd[0];
704
softc->rx_cp_rings[i].ring.db_ring_mask =
705
softc->rx_cp_rings[i].ring.ring_size - 1;
706
707
softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
708
softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
709
710
/* Allocate the RX ring */
711
softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
712
softc->rx_rings[i].softc = softc;
713
softc->rx_rings[i].idx = i;
714
softc->rx_rings[i].id = i + 1;
715
softc->rx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
716
softc->legacy_db_size : softc->rx_rings[i].id * 0x80;
717
softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
718
softc->rx_rings[i].db_ring_mask =
719
softc->rx_rings[i].ring_size -1;
720
softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
721
softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
722
723
/* Allocate the TPA start buffer */
724
softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
725
(RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
726
M_DEVBUF, M_NOWAIT | M_ZERO);
727
if (softc->rx_rings[i].tpa_start == NULL) {
728
rc = -ENOMEM;
729
device_printf(softc->dev,
730
"Unable to allocate space for TPA\n");
731
goto tpa_alloc_fail;
732
}
733
/* Allocate the AG ring */
734
softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
735
softc->ag_rings[i].softc = softc;
736
softc->ag_rings[i].idx = i;
737
softc->ag_rings[i].id = nrxqsets + i + 1;
738
softc->ag_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
739
softc->legacy_db_size : softc->ag_rings[i].id * 0x80;
740
softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
741
softc->ag_rings[i].db_ring_mask = softc->ag_rings[i].ring_size - 1;
742
softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
743
softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
744
745
/* Allocate the ring group */
746
softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
747
softc->grp_info[i].stats_ctx =
748
softc->rx_cp_rings[i].stats_ctx_id;
749
softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
750
softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
751
softc->grp_info[i].cp_ring_id =
752
softc->rx_cp_rings[i].ring.phys_id;
753
754
bnxt_create_rx_sysctls(softc, i);
755
}
756
757
/*
758
* When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
759
* HWRM every sec with which firmware timeouts can happen
760
*/
761
if (BNXT_PF(softc))
762
bnxt_create_port_stats_sysctls(softc);
763
764
/* And finally, the VNIC */
765
softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
766
softc->vnic_info.filter_id = -1;
767
softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
768
softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
769
softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
770
softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
771
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
772
softc->vnic_info.mc_list_count = 0;
773
softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
774
rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
775
&softc->vnic_info.mc_list, 0);
776
if (rc)
777
goto mc_list_alloc_fail;
778
779
/* The VNIC RSS Hash Key */
780
rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
781
&softc->vnic_info.rss_hash_key_tbl, 0);
782
if (rc)
783
goto rss_hash_alloc_fail;
784
bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
785
softc->vnic_info.rss_hash_key_tbl.idi_map,
786
BUS_DMASYNC_PREWRITE);
787
memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
788
softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
789
790
/* Allocate the RSS tables */
791
rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
792
&softc->vnic_info.rss_grp_tbl, 0);
793
if (rc)
794
goto rss_grp_alloc_fail;
795
bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
796
softc->vnic_info.rss_grp_tbl.idi_map,
797
BUS_DMASYNC_PREWRITE);
798
memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
799
softc->vnic_info.rss_grp_tbl.idi_size);
800
801
softc->nrxqsets = nrxqsets;
802
return rc;
803
804
rss_grp_alloc_fail:
805
iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
806
rss_hash_alloc_fail:
807
iflib_dma_free(&softc->vnic_info.mc_list);
808
mc_list_alloc_fail:
809
for (i = i - 1; i >= 0; i--) {
810
if (softc->rx_rings[i].tpa_start)
811
free(softc->rx_rings[i].tpa_start, M_DEVBUF);
812
}
813
tpa_alloc_fail:
814
iflib_dma_free(&softc->hw_tx_port_stats_ext);
815
hw_port_tx_stats_ext_alloc_fail:
816
iflib_dma_free(&softc->hw_rx_port_stats_ext);
817
hw_port_rx_stats_ext_alloc_fail:
818
iflib_dma_free(&softc->hw_tx_port_stats);
819
hw_port_tx_stats_alloc_fail:
820
iflib_dma_free(&softc->hw_rx_port_stats);
821
hw_port_rx_stats_alloc_fail:
822
for (i=0; i < nrxqsets; i++) {
823
if (softc->rx_stats[i].idi_vaddr)
824
iflib_dma_free(&softc->rx_stats[i]);
825
}
826
hw_stats_alloc_fail:
827
free(softc->grp_info, M_DEVBUF);
828
grp_alloc_fail:
829
free(softc->ag_rings, M_DEVBUF);
830
ag_alloc_fail:
831
free(softc->rx_rings, M_DEVBUF);
832
ring_alloc_fail:
833
free(softc->rx_cp_rings, M_DEVBUF);
834
cp_alloc_fail:
835
return rc;
836
}
837
838
static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
839
{
840
if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
841
iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
842
softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
843
}
844
845
static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
846
{
847
int rc;
848
849
rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
850
&softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
851
852
return rc;
853
}
854
855
static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
856
{
857
int i;
858
859
for (i = 0; i < rmem->nr_pages; i++) {
860
if (!rmem->pg_arr[i].idi_vaddr)
861
continue;
862
863
iflib_dma_free(&rmem->pg_arr[i]);
864
rmem->pg_arr[i].idi_vaddr = NULL;
865
}
866
if (rmem->pg_tbl.idi_vaddr) {
867
iflib_dma_free(&rmem->pg_tbl);
868
rmem->pg_tbl.idi_vaddr = NULL;
869
870
}
871
if (rmem->vmem_size && *rmem->vmem) {
872
free(*rmem->vmem, M_DEVBUF);
873
*rmem->vmem = NULL;
874
}
875
}
876
877
static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
878
{
879
u8 init_val = ctxm->init_value;
880
u16 offset = ctxm->init_offset;
881
u8 *p2 = p;
882
int i;
883
884
if (!init_val)
885
return;
886
if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
887
memset(p, init_val, len);
888
return;
889
}
890
for (i = 0; i < len; i += ctxm->entry_size)
891
*(p2 + i + offset) = init_val;
892
}
893
894
static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
895
{
896
uint64_t valid_bit = 0;
897
int i;
898
int rc;
899
900
if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
901
valid_bit = PTU_PTE_VALID;
902
903
if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
904
size_t pg_tbl_size = rmem->nr_pages * 8;
905
906
if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
907
pg_tbl_size = rmem->page_size;
908
909
rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
910
if (rc)
911
return -ENOMEM;
912
}
913
914
for (i = 0; i < rmem->nr_pages; i++) {
915
uint64_t extra_bits = valid_bit;
916
uint64_t *ptr;
917
918
rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
919
if (rc)
920
return -ENOMEM;
921
922
if (rmem->ctx_mem)
923
bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
924
rmem->page_size);
925
926
if (rmem->nr_pages > 1 || rmem->depth > 0) {
927
if (i == rmem->nr_pages - 2 &&
928
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
929
extra_bits |= PTU_PTE_NEXT_TO_LAST;
930
else if (i == rmem->nr_pages - 1 &&
931
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
932
extra_bits |= PTU_PTE_LAST;
933
934
ptr = (void *) rmem->pg_tbl.idi_vaddr;
935
ptr[i] = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
936
}
937
}
938
939
if (rmem->vmem_size) {
940
*rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
941
if (!(*rmem->vmem))
942
return -ENOMEM;
943
}
944
return 0;
945
}
946
947
948
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
949
(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \
950
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
951
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \
952
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
953
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
954
955
static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
956
struct bnxt_ctx_pg_info *ctx_pg)
957
{
958
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
959
960
rmem->page_size = BNXT_PAGE_SIZE;
961
rmem->pg_arr = ctx_pg->ctx_arr;
962
rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
963
if (rmem->depth >= 1)
964
rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
965
966
return bnxt_alloc_ring(softc, rmem);
967
}
968
969
static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
970
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
971
u8 depth, struct bnxt_ctx_mem_type *ctxm)
972
{
973
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
974
int rc;
975
976
if (!mem_size)
977
return -EINVAL;
978
979
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
980
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
981
ctx_pg->nr_pages = 0;
982
return -EINVAL;
983
}
984
if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
985
int nr_tbls, i;
986
987
rmem->depth = 2;
988
ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
989
GFP_KERNEL);
990
if (!ctx_pg->ctx_pg_tbl)
991
return -ENOMEM;
992
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
993
rmem->nr_pages = nr_tbls;
994
rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
995
if (rc)
996
return rc;
997
for (i = 0; i < nr_tbls; i++) {
998
struct bnxt_ctx_pg_info *pg_tbl;
999
1000
pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
1001
if (!pg_tbl)
1002
return -ENOMEM;
1003
ctx_pg->ctx_pg_tbl[i] = pg_tbl;
1004
rmem = &pg_tbl->ring_mem;
1005
memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
1006
rmem->depth = 1;
1007
rmem->nr_pages = MAX_CTX_PAGES;
1008
rmem->ctx_mem = ctxm;
1009
if (i == (nr_tbls - 1)) {
1010
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
1011
1012
if (rem)
1013
rmem->nr_pages = rem;
1014
}
1015
rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
1016
if (rc)
1017
break;
1018
}
1019
} else {
1020
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1021
if (rmem->nr_pages > 1 || depth)
1022
rmem->depth = 1;
1023
rmem->ctx_mem = ctxm;
1024
rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
1025
}
1026
return rc;
1027
}
1028
1029
static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
1030
struct bnxt_ctx_pg_info *ctx_pg)
1031
{
1032
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1033
1034
if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
1035
ctx_pg->ctx_pg_tbl) {
1036
int i, nr_tbls = rmem->nr_pages;
1037
1038
for (i = 0; i < nr_tbls; i++) {
1039
struct bnxt_ctx_pg_info *pg_tbl;
1040
struct bnxt_ring_mem_info *rmem2;
1041
1042
pg_tbl = ctx_pg->ctx_pg_tbl[i];
1043
if (!pg_tbl)
1044
continue;
1045
rmem2 = &pg_tbl->ring_mem;
1046
bnxt_free_ring(softc, rmem2);
1047
ctx_pg->ctx_arr[i].idi_vaddr = NULL;
1048
free(pg_tbl , M_DEVBUF);
1049
ctx_pg->ctx_pg_tbl[i] = NULL;
1050
}
1051
kfree(ctx_pg->ctx_pg_tbl);
1052
ctx_pg->ctx_pg_tbl = NULL;
1053
}
1054
bnxt_free_ring(softc, rmem);
1055
ctx_pg->nr_pages = 0;
1056
}
1057
1058
static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
1059
struct bnxt_ctx_mem_type *ctxm, u32 entries,
1060
u8 pg_lvl)
1061
{
1062
struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1063
int i, rc = 0, n = 1;
1064
u32 mem_size;
1065
1066
if (!ctxm->entry_size || !ctx_pg)
1067
return -EINVAL;
1068
if (ctxm->instance_bmap)
1069
n = hweight32(ctxm->instance_bmap);
1070
if (ctxm->entry_multiple)
1071
entries = roundup(entries, ctxm->entry_multiple);
1072
entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
1073
mem_size = entries * ctxm->entry_size;
1074
for (i = 0; i < n && !rc; i++) {
1075
ctx_pg[i].entries = entries;
1076
rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
1077
ctxm->init_value ? ctxm : NULL);
1078
}
1079
if (!rc)
1080
ctxm->mem_valid = 1;
1081
return rc;
1082
}
1083
1084
static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
1085
{
1086
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1087
u16 type;
1088
1089
if (!ctx)
1090
return;
1091
1092
for (type = 0; type < BNXT_CTX_MAX; type++) {
1093
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
1094
struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1095
int i, n = 1;
1096
1097
if (!ctx_pg)
1098
continue;
1099
if (ctxm->instance_bmap)
1100
n = hweight32(ctxm->instance_bmap);
1101
for (i = 0; i < n; i++)
1102
bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
1103
1104
kfree(ctx_pg);
1105
ctxm->pg_info = NULL;
1106
}
1107
1108
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
1109
kfree(ctx);
1110
softc->ctx_mem = NULL;
1111
}
1112
1113
static int
1114
bnxt_backing_store_cfg_v2(struct bnxt_softc *softc, u32 ena)
1115
{
1116
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1117
struct bnxt_ctx_mem_type *ctxm;
1118
u16 last_type = BNXT_CTX_INV;
1119
int rc = 0;
1120
u16 type;
1121
1122
if (BNXT_PF(softc)) {
1123
for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) {
1124
ctxm = &ctx->ctx_arr[type];
1125
if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID))
1126
continue;
1127
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1128
if (rc) {
1129
device_printf(softc->dev, "Unable to setup ctx page for type:0x%x.\n", type);
1130
rc = 0;
1131
continue;
1132
}
1133
/* ckp TODO: this is trace buffer related stuff, so keeping it diabled now. needs revisit */
1134
//bnxt_bs_trace_init(bp, ctxm, type - BNXT_CTX_SRT_TRACE);
1135
last_type = type;
1136
}
1137
}
1138
1139
if (last_type == BNXT_CTX_INV) {
1140
if (!ena)
1141
return 0;
1142
else if (ena & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM)
1143
last_type = BNXT_CTX_MAX - 1;
1144
else
1145
last_type = BNXT_CTX_L2_MAX - 1;
1146
}
1147
ctx->ctx_arr[last_type].last = 1;
1148
1149
for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
1150
ctxm = &ctx->ctx_arr[type];
1151
1152
if (!ctxm->mem_valid)
1153
continue;
1154
rc = bnxt_hwrm_func_backing_store_cfg_v2(softc, ctxm, ctxm->last);
1155
if (rc)
1156
return rc;
1157
}
1158
return 0;
1159
}
1160
1161
static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
1162
{
1163
struct bnxt_ctx_pg_info *ctx_pg;
1164
struct bnxt_ctx_mem_type *ctxm;
1165
struct bnxt_ctx_mem_info *ctx;
1166
u32 l2_qps, qp1_qps, max_qps;
1167
u32 ena, entries_sp, entries;
1168
u32 srqs, max_srqs, min;
1169
u32 num_mr, num_ah;
1170
u32 extra_srqs = 0;
1171
u32 extra_qps = 0;
1172
u8 pg_lvl = 1;
1173
int i, rc;
1174
1175
if (!BNXT_CHIP_P5_PLUS(softc))
1176
return 0;
1177
1178
rc = bnxt_hwrm_func_backing_store_qcaps(softc);
1179
if (rc) {
1180
device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1181
rc);
1182
return rc;
1183
}
1184
ctx = softc->ctx_mem;
1185
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1186
return 0;
1187
1188
ena = 0;
1189
if (BNXT_VF(softc))
1190
goto skip_legacy;
1191
1192
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1193
l2_qps = ctxm->qp_l2_entries;
1194
qp1_qps = ctxm->qp_qp1_entries;
1195
max_qps = ctxm->max_entries;
1196
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1197
srqs = ctxm->srq_l2_entries;
1198
max_srqs = ctxm->max_entries;
1199
if (softc->flags & BNXT_FLAG_ROCE_CAP) {
1200
pg_lvl = 2;
1201
extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
1202
extra_srqs = min_t(u32, 8192, max_srqs - srqs);
1203
}
1204
1205
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1206
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
1207
pg_lvl);
1208
if (rc)
1209
return rc;
1210
1211
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1212
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
1213
if (rc)
1214
return rc;
1215
1216
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
1217
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
1218
extra_qps * 2, pg_lvl);
1219
if (rc)
1220
return rc;
1221
1222
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
1223
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1224
if (rc)
1225
return rc;
1226
1227
ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
1228
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1229
if (rc)
1230
return rc;
1231
1232
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
1233
goto skip_rdma;
1234
1235
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
1236
ctx_pg = ctxm->pg_info;
1237
/* 128K extra is needed to accomodate static AH context
1238
* allocation by f/w.
1239
*/
1240
num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
1241
num_ah = min_t(u32, num_mr, 1024 * 128);
1242
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
1243
if (rc)
1244
return rc;
1245
ctx_pg->entries = num_mr + num_ah;
1246
ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1247
if (ctxm->mrav_num_entries_units)
1248
ctx_pg->entries =
1249
((num_mr / ctxm->mrav_num_entries_units) << 16) |
1250
(num_ah / ctxm->mrav_num_entries_units);
1251
1252
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
1253
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
1254
if (rc)
1255
return rc;
1256
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1257
1258
skip_rdma:
1259
ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
1260
min = ctxm->min_entries;
1261
entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
1262
2 * (extra_qps + qp1_qps) + min;
1263
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
1264
if (rc)
1265
return rc;
1266
1267
ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
1268
entries = l2_qps + 2 * (extra_qps + qp1_qps);
1269
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
1270
if (rc)
1271
return rc;
1272
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1273
if (i < BNXT_MAX_TQM_LEGACY_RINGS)
1274
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1275
else
1276
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
1277
}
1278
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1279
1280
skip_legacy:
1281
if (BNXT_CHIP_P7(softc)) {
1282
if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
1283
rc = bnxt_backing_store_cfg_v2(softc, ena);
1284
} else {
1285
rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1286
}
1287
if (rc) {
1288
device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1289
rc);
1290
return rc;
1291
}
1292
ctx->flags |= BNXT_CTX_FLAG_INITED;
1293
1294
return 0;
1295
}
1296
1297
/*
1298
* If we update the index, a write barrier is needed after the write to ensure
1299
* the completion ring has space before the RX/TX ring does. Since we can't
1300
* make the RX and AG doorbells covered by the same barrier without remapping
1301
* MSI-X vectors, we create the barrier over the enture doorbell bar.
1302
* TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1303
* for a single ring group.
1304
*
1305
* A barrier of just the size of the write is used to ensure the ordering
1306
* remains correct and no writes are lost.
1307
*/
1308
1309
static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1310
{
1311
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1312
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1313
1314
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1315
BUS_SPACE_BARRIER_WRITE);
1316
bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1317
htole32(RX_DOORBELL_KEY_RX | idx));
1318
}
1319
1320
static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1321
{
1322
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1323
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1324
1325
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1326
BUS_SPACE_BARRIER_WRITE);
1327
bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1328
htole32(TX_DOORBELL_KEY_TX | idx));
1329
}
1330
1331
static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1332
{
1333
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1334
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1335
1336
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1337
BUS_SPACE_BARRIER_WRITE);
1338
bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1339
htole32(CMPL_DOORBELL_KEY_CMPL |
1340
((cpr->cons == UINT32_MAX) ? 0 :
1341
(cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1342
((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1343
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1344
BUS_SPACE_BARRIER_WRITE);
1345
}
1346
1347
static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1348
{
1349
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1350
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1351
1352
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1353
BUS_SPACE_BARRIER_WRITE);
1354
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1355
htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1356
((uint64_t)ring->phys_id << DBR_XID_SFT)));
1357
}
1358
1359
static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1360
{
1361
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1362
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1363
1364
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1365
BUS_SPACE_BARRIER_WRITE);
1366
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1367
htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1368
((uint64_t)ring->phys_id << DBR_XID_SFT)));
1369
}
1370
1371
static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1372
{
1373
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1374
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1375
dbc_dbc_t db_msg = { 0 };
1376
uint32_t cons = cpr->cons;
1377
1378
if (cons == UINT32_MAX)
1379
cons = 0;
1380
else
1381
cons = RING_NEXT(&cpr->ring, cons);
1382
1383
db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1384
1385
db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1386
DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1387
((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1388
1389
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1390
BUS_SPACE_BARRIER_WRITE);
1391
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1392
htole64(*(uint64_t *)&db_msg));
1393
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1394
BUS_SPACE_BARRIER_WRITE);
1395
}
1396
1397
static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1398
{
1399
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1400
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1401
dbc_dbc_t db_msg = { 0 };
1402
uint32_t cons = cpr->cons;
1403
1404
db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1405
1406
db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1407
DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1408
((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1409
1410
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1411
BUS_SPACE_BARRIER_WRITE);
1412
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1413
htole64(*(uint64_t *)&db_msg));
1414
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1415
BUS_SPACE_BARRIER_WRITE);
1416
}
1417
1418
static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1419
{
1420
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1421
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1422
dbc_dbc_t db_msg = { 0 };
1423
uint32_t cons = cpr->cons;
1424
1425
db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1426
1427
db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1428
DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1429
((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1430
1431
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1432
BUS_SPACE_BARRIER_WRITE);
1433
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1434
htole64(*(uint64_t *)&db_msg));
1435
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1436
BUS_SPACE_BARRIER_WRITE);
1437
}
1438
1439
static void
1440
bnxt_thor2_db_rx(void *db_ptr, uint16_t idx)
1441
{
1442
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1443
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1444
uint64_t db_val;
1445
1446
if (idx >= ring->ring_size) {
1447
device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1448
return;
1449
}
1450
1451
db_val = ((DBR_PATH_L2 | DBR_TYPE_SRQ | DBR_VALID | idx) |
1452
((uint64_t)ring->phys_id << DBR_XID_SFT));
1453
1454
/* Add the PI index */
1455
db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1456
1457
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1458
BUS_SPACE_BARRIER_WRITE);
1459
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1460
htole64(db_val));
1461
}
1462
1463
static void
1464
bnxt_thor2_db_tx(void *db_ptr, uint16_t idx)
1465
{
1466
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1467
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1468
uint64_t db_val;
1469
1470
if (idx >= ring->ring_size) {
1471
device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1472
return;
1473
}
1474
1475
db_val = ((DBR_PATH_L2 | DBR_TYPE_SQ | DBR_VALID | idx) |
1476
((uint64_t)ring->phys_id << DBR_XID_SFT));
1477
1478
/* Add the PI index */
1479
db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1480
1481
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1482
BUS_SPACE_BARRIER_WRITE);
1483
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1484
htole64(db_val));
1485
}
1486
1487
static void
1488
bnxt_thor2_db_rx_cq(void *db_ptr, bool enable_irq)
1489
{
1490
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1491
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1492
u64 db_msg = { 0 };
1493
uint32_t cons = cpr->raw_cons;
1494
uint32_t toggle = 0;
1495
1496
if (cons == UINT32_MAX)
1497
cons = 0;
1498
1499
if (enable_irq == true)
1500
toggle = cpr->toggle;
1501
1502
db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1503
DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1504
1505
if (enable_irq)
1506
db_msg |= DBR_TYPE_CQ_ARMALL;
1507
else
1508
db_msg |= DBR_TYPE_CQ;
1509
1510
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1511
BUS_SPACE_BARRIER_WRITE);
1512
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1513
htole64(*(uint64_t *)&db_msg));
1514
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1515
BUS_SPACE_BARRIER_WRITE);
1516
}
1517
1518
static void
1519
bnxt_thor2_db_tx_cq(void *db_ptr, bool enable_irq)
1520
{
1521
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1522
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1523
u64 db_msg = { 0 };
1524
uint32_t cons = cpr->raw_cons;
1525
uint32_t toggle = 0;
1526
1527
if (enable_irq == true)
1528
toggle = cpr->toggle;
1529
1530
db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1531
DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1532
1533
if (enable_irq)
1534
db_msg |= DBR_TYPE_CQ_ARMALL;
1535
else
1536
db_msg |= DBR_TYPE_CQ;
1537
1538
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1539
BUS_SPACE_BARRIER_WRITE);
1540
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1541
htole64(*(uint64_t *)&db_msg));
1542
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1543
BUS_SPACE_BARRIER_WRITE);
1544
}
1545
1546
static void
1547
bnxt_thor2_db_nq(void *db_ptr, bool enable_irq)
1548
{
1549
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1550
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1551
u64 db_msg = { 0 };
1552
uint32_t cons = cpr->raw_cons;
1553
uint32_t toggle = 0;
1554
1555
if (enable_irq == true)
1556
toggle = cpr->toggle;
1557
1558
db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1559
DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1560
1561
if (enable_irq)
1562
db_msg |= DBR_TYPE_NQ_ARM;
1563
else
1564
db_msg |= DBR_TYPE_NQ_MASK;
1565
1566
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1567
BUS_SPACE_BARRIER_WRITE);
1568
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1569
htole64(*(uint64_t *)&db_msg));
1570
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1571
BUS_SPACE_BARRIER_WRITE);
1572
}
1573
1574
struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1575
{
1576
struct bnxt_softc_list *sc = NULL;
1577
1578
SLIST_FOREACH(sc, &pf_list, next) {
1579
/* get the softc reference based on device name */
1580
if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1581
return sc->softc;
1582
}
1583
/* get the softc reference based on domain,bus,device,function */
1584
if (!dev_name &&
1585
(domain == sc->softc->domain) &&
1586
(bus == sc->softc->bus) &&
1587
(dev_fn == sc->softc->dev_fn)) {
1588
return sc->softc;
1589
1590
}
1591
}
1592
1593
return NULL;
1594
}
1595
1596
1597
static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
1598
{
1599
uint8_t i, lltc = 0;
1600
1601
if (!softc->max_lltc)
1602
return;
1603
1604
/* Verify that lossless TX and RX queues are in the same index */
1605
for (i = 0; i < softc->max_tc; i++) {
1606
if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
1607
BNXT_LLQ(softc->rx_q_info[i].queue_profile))
1608
lltc++;
1609
}
1610
softc->max_lltc = min(softc->max_lltc, lltc);
1611
}
1612
1613
static int bnxt_hwrm_poll(struct bnxt_softc *bp)
1614
{
1615
struct hwrm_ver_get_output *resp =
1616
(void *)bp->hwrm_cmd_resp.idi_vaddr;
1617
struct hwrm_ver_get_input req = {0};
1618
int rc;
1619
1620
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
1621
1622
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1623
req.hwrm_intf_min = HWRM_VERSION_MINOR;
1624
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1625
1626
rc = _hwrm_send_message(bp, &req, sizeof(req));
1627
if (rc)
1628
return rc;
1629
1630
if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
1631
rc = -EAGAIN;
1632
1633
return rc;
1634
}
1635
1636
static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
1637
{
1638
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
1639
* set. If the device is being closed, bnxt_close() may be holding
1640
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
1641
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
1642
*/
1643
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1644
rtnl_lock();
1645
}
1646
1647
static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
1648
{
1649
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1650
rtnl_unlock();
1651
}
1652
1653
static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
1654
{
1655
bnxt_disable_intr(softc->ctx);
1656
if (pci_is_enabled(softc->pdev))
1657
pci_disable_device(softc->pdev);
1658
}
1659
1660
static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
1661
{
1662
struct bnxt_fw_health *fw_health = bp->fw_health;
1663
u32 reg = fw_health->regs[reg_idx];
1664
u32 reg_type, reg_off, val = 0;
1665
1666
reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1667
reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1668
switch (reg_type) {
1669
case BNXT_FW_HEALTH_REG_TYPE_CFG:
1670
pci_read_config_dword(bp->pdev, reg_off, &val);
1671
break;
1672
case BNXT_FW_HEALTH_REG_TYPE_GRC:
1673
reg_off = fw_health->mapped_regs[reg_idx];
1674
fallthrough;
1675
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1676
val = readl_fbsd(bp, reg_off, 0);
1677
break;
1678
case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1679
val = readl_fbsd(bp, reg_off, 2);
1680
break;
1681
}
1682
if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1683
val &= fw_health->fw_reset_inprog_reg_mask;
1684
return val;
1685
}
1686
1687
static void bnxt_fw_reset_close(struct bnxt_softc *bp)
1688
{
1689
int i;
1690
bnxt_ulp_stop(bp);
1691
/* When firmware is in fatal state, quiesce device and disable
1692
* bus master to prevent any potential bad DMAs before freeing
1693
* kernel memory.
1694
*/
1695
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
1696
u16 val = 0;
1697
1698
val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1699
if (val == 0xffff) {
1700
bp->fw_reset_min_dsecs = 0;
1701
}
1702
bnxt_fw_fatal_close(bp);
1703
}
1704
1705
iflib_request_reset(bp->ctx);
1706
bnxt_stop(bp->ctx);
1707
bnxt_hwrm_func_drv_unrgtr(bp, false);
1708
1709
for (i = bp->nrxqsets-1; i>=0; i--) {
1710
if (BNXT_CHIP_P5_PLUS(bp))
1711
iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
1712
else
1713
iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
1714
1715
}
1716
if (pci_is_enabled(bp->pdev))
1717
pci_disable_device(bp->pdev);
1718
pci_disable_busmaster(bp->dev);
1719
bnxt_free_ctx_mem(bp);
1720
}
1721
1722
static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
1723
{
1724
struct bnxt_fw_health *fw_health = bp->fw_health;
1725
bool no_heartbeat = false, has_reset = false;
1726
u32 val;
1727
1728
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
1729
if (val == fw_health->last_fw_heartbeat)
1730
no_heartbeat = true;
1731
1732
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1733
if (val != fw_health->last_fw_reset_cnt)
1734
has_reset = true;
1735
1736
if (!no_heartbeat && has_reset)
1737
return true;
1738
1739
return false;
1740
}
1741
1742
void bnxt_fw_reset(struct bnxt_softc *bp)
1743
{
1744
bnxt_rtnl_lock_sp(bp);
1745
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
1746
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1747
int tmo;
1748
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1749
bnxt_fw_reset_close(bp);
1750
1751
if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
1752
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
1753
tmo = HZ / 10;
1754
} else {
1755
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1756
tmo = bp->fw_reset_min_dsecs * HZ /10;
1757
}
1758
bnxt_queue_fw_reset_work(bp, tmo);
1759
}
1760
bnxt_rtnl_unlock_sp(bp);
1761
}
1762
1763
static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
1764
{
1765
if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1766
return;
1767
1768
if (BNXT_PF(bp))
1769
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1770
else
1771
schedule_delayed_work(&bp->fw_reset_task, delay);
1772
}
1773
1774
void bnxt_queue_sp_work(struct bnxt_softc *bp)
1775
{
1776
if (BNXT_PF(bp))
1777
queue_work(bnxt_pf_wq, &bp->sp_task);
1778
else
1779
schedule_work(&bp->sp_task);
1780
}
1781
1782
static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
1783
{
1784
struct bnxt_fw_health *fw_health = bp->fw_health;
1785
u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
1786
u32 val = fw_health->fw_reset_seq_vals[reg_idx];
1787
u32 reg_type, reg_off, delay_msecs;
1788
1789
delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
1790
reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1791
reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1792
switch (reg_type) {
1793
case BNXT_FW_HEALTH_REG_TYPE_CFG:
1794
pci_write_config_dword(bp->pdev, reg_off, val);
1795
break;
1796
case BNXT_FW_HEALTH_REG_TYPE_GRC:
1797
writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
1798
reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
1799
fallthrough;
1800
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1801
writel_fbsd(bp, reg_off, 0, val);
1802
break;
1803
case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1804
writel_fbsd(bp, reg_off, 2, val);
1805
break;
1806
}
1807
if (delay_msecs) {
1808
pci_read_config_dword(bp->pdev, 0, &val);
1809
msleep(delay_msecs);
1810
}
1811
}
1812
1813
static void bnxt_reset_all(struct bnxt_softc *bp)
1814
{
1815
struct bnxt_fw_health *fw_health = bp->fw_health;
1816
int i, rc;
1817
1818
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
1819
bp->fw_reset_timestamp = jiffies;
1820
return;
1821
}
1822
1823
if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
1824
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
1825
bnxt_fw_reset_writel(bp, i);
1826
} else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
1827
struct hwrm_fw_reset_input req = {0};
1828
1829
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
1830
req.target_id = htole16(HWRM_TARGET_ID_KONG);
1831
req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
1832
req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
1833
req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
1834
rc = hwrm_send_message(bp, &req, sizeof(req));
1835
1836
if (rc != -ENODEV)
1837
device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
1838
}
1839
bp->fw_reset_timestamp = jiffies;
1840
}
1841
1842
static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
1843
{
1844
if (bp->fw_health)
1845
return 0;
1846
1847
bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
1848
if (!bp->fw_health)
1849
return -ENOMEM;
1850
1851
mutex_init(&bp->fw_health->lock);
1852
return 0;
1853
}
1854
1855
static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
1856
{
1857
int rc;
1858
1859
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
1860
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1861
return 0;
1862
1863
rc = __bnxt_alloc_fw_health(bp);
1864
if (rc) {
1865
bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
1866
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1867
return rc;
1868
}
1869
1870
return 0;
1871
}
1872
1873
static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
1874
{
1875
writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
1876
}
1877
1878
static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
1879
{
1880
struct bnxt_fw_health *fw_health = bp->fw_health;
1881
u32 reg_base = 0xffffffff;
1882
int i;
1883
1884
bp->fw_health->status_reliable = false;
1885
bp->fw_health->resets_reliable = false;
1886
/* Only pre-map the monitoring GRC registers using window 3 */
1887
for (i = 0; i < 4; i++) {
1888
u32 reg = fw_health->regs[i];
1889
1890
if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
1891
continue;
1892
if (reg_base == 0xffffffff)
1893
reg_base = reg & BNXT_GRC_BASE_MASK;
1894
if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
1895
return -ERANGE;
1896
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
1897
}
1898
bp->fw_health->status_reliable = true;
1899
bp->fw_health->resets_reliable = true;
1900
if (reg_base == 0xffffffff)
1901
return 0;
1902
1903
__bnxt_map_fw_health_reg(bp, reg_base);
1904
return 0;
1905
}
1906
1907
static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
1908
{
1909
struct bnxt_fw_health *fw_health = bp->fw_health;
1910
u32 reg_type;
1911
1912
if (!fw_health)
1913
return;
1914
1915
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
1916
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1917
fw_health->status_reliable = false;
1918
1919
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
1920
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1921
fw_health->resets_reliable = false;
1922
}
1923
1924
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
1925
{
1926
struct bnxt_fw_health *fw_health = bp->fw_health;
1927
struct hwrm_error_recovery_qcfg_output *resp =
1928
(void *)bp->hwrm_cmd_resp.idi_vaddr;
1929
struct hwrm_error_recovery_qcfg_input req = {0};
1930
int rc, i;
1931
1932
if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1933
return 0;
1934
1935
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
1936
rc = _hwrm_send_message(bp, &req, sizeof(req));
1937
1938
if (rc)
1939
goto err_recovery_out;
1940
fw_health->flags = le32toh(resp->flags);
1941
if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
1942
!(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
1943
rc = -EINVAL;
1944
goto err_recovery_out;
1945
}
1946
fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
1947
fw_health->master_func_wait_dsecs =
1948
le32toh(resp->master_func_wait_period);
1949
fw_health->normal_func_wait_dsecs =
1950
le32toh(resp->normal_func_wait_period);
1951
fw_health->post_reset_wait_dsecs =
1952
le32toh(resp->master_func_wait_period_after_reset);
1953
fw_health->post_reset_max_wait_dsecs =
1954
le32toh(resp->max_bailout_time_after_reset);
1955
fw_health->regs[BNXT_FW_HEALTH_REG] =
1956
le32toh(resp->fw_health_status_reg);
1957
fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
1958
le32toh(resp->fw_heartbeat_reg);
1959
fw_health->regs[BNXT_FW_RESET_CNT_REG] =
1960
le32toh(resp->fw_reset_cnt_reg);
1961
fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
1962
le32toh(resp->reset_inprogress_reg);
1963
fw_health->fw_reset_inprog_reg_mask =
1964
le32toh(resp->reset_inprogress_reg_mask);
1965
fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
1966
if (fw_health->fw_reset_seq_cnt >= 16) {
1967
rc = -EINVAL;
1968
goto err_recovery_out;
1969
}
1970
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
1971
fw_health->fw_reset_seq_regs[i] =
1972
le32toh(resp->reset_reg[i]);
1973
fw_health->fw_reset_seq_vals[i] =
1974
le32toh(resp->reset_reg_val[i]);
1975
fw_health->fw_reset_seq_delay_msec[i] =
1976
le32toh(resp->delay_after_reset[i]);
1977
}
1978
err_recovery_out:
1979
if (!rc)
1980
rc = bnxt_map_fw_health_regs(bp);
1981
if (rc)
1982
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1983
return rc;
1984
}
1985
1986
static int bnxt_drv_rgtr(struct bnxt_softc *bp)
1987
{
1988
int rc;
1989
1990
/* determine whether we can support error recovery before
1991
* registering with FW
1992
*/
1993
if (bnxt_alloc_fw_health(bp)) {
1994
device_printf(bp->dev, "no memory for firmware error recovery\n");
1995
} else {
1996
rc = bnxt_hwrm_error_recovery_qcfg(bp);
1997
if (rc)
1998
device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
1999
rc);
2000
}
2001
rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); //sumit dbg: revisit the params
2002
if (rc)
2003
return -ENODEV;
2004
return 0;
2005
}
2006
2007
static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
2008
{
2009
return time_after(jiffies, bp->fw_reset_timestamp +
2010
(bp->fw_reset_max_dsecs * HZ / 10));
2011
}
2012
2013
static int bnxt_open(struct bnxt_softc *bp)
2014
{
2015
int rc = 0;
2016
if (BNXT_PF(bp))
2017
rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
2018
&bp->nvm_info->device_id, &bp->nvm_info->sector_size,
2019
&bp->nvm_info->size, &bp->nvm_info->reserved_size,
2020
&bp->nvm_info->available_size);
2021
2022
/* Get the queue config */
2023
rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2024
if (rc) {
2025
device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
2026
return rc;
2027
}
2028
if (bp->is_asym_q) {
2029
rc = bnxt_hwrm_queue_qportcfg(bp,
2030
HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2031
if (rc) {
2032
device_printf(bp->dev, "re-init: hwrm qportcfg (rx) failed\n");
2033
return rc;
2034
}
2035
bnxt_verify_asym_queues(bp);
2036
} else {
2037
bp->rx_max_q = bp->tx_max_q;
2038
memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
2039
memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
2040
}
2041
/* Get the HW capabilities */
2042
rc = bnxt_hwrm_func_qcaps(bp);
2043
if (rc)
2044
return rc;
2045
2046
/* Register the driver with the FW */
2047
rc = bnxt_drv_rgtr(bp);
2048
if (rc)
2049
return rc;
2050
if (bp->hwrm_spec_code >= 0x10803) {
2051
rc = bnxt_alloc_ctx_mem(bp);
2052
if (rc) {
2053
device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
2054
return rc;
2055
}
2056
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
2057
if (!rc)
2058
bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2059
}
2060
2061
if (BNXT_CHIP_P5_PLUS(bp))
2062
bnxt_hwrm_reserve_pf_rings(bp);
2063
/* Get the current configuration of this function */
2064
rc = bnxt_hwrm_func_qcfg(bp);
2065
if (rc) {
2066
device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
2067
return rc;
2068
}
2069
2070
bnxt_msix_intr_assign(bp->ctx, 0);
2071
bnxt_init(bp->ctx);
2072
bnxt_intr_enable(bp->ctx);
2073
2074
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
2075
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2076
bnxt_ulp_start(bp, 0);
2077
}
2078
}
2079
2080
device_printf(bp->dev, "Network interface is UP and operational\n");
2081
2082
return rc;
2083
}
2084
static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
2085
{
2086
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2087
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
2088
bnxt_ulp_start(bp, rc);
2089
}
2090
bp->fw_reset_state = 0;
2091
}
2092
2093
static void bnxt_fw_reset_task(struct work_struct *work)
2094
{
2095
struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
2096
int rc = 0;
2097
2098
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2099
device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
2100
return;
2101
}
2102
2103
switch (bp->fw_reset_state) {
2104
case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
2105
u32 val;
2106
2107
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2108
if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
2109
!bnxt_fw_reset_timeout(bp)) {
2110
bnxt_queue_fw_reset_work(bp, HZ / 5);
2111
return;
2112
}
2113
2114
if (!bp->fw_health->primary) {
2115
u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
2116
2117
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2118
bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2119
return;
2120
}
2121
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2122
}
2123
fallthrough;
2124
case BNXT_FW_RESET_STATE_RESET_FW:
2125
bnxt_reset_all(bp);
2126
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2127
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
2128
return;
2129
case BNXT_FW_RESET_STATE_ENABLE_DEV:
2130
bnxt_inv_fw_health_reg(bp);
2131
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
2132
!bp->fw_reset_min_dsecs) {
2133
u16 val;
2134
2135
val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
2136
if (val == 0xffff) {
2137
if (bnxt_fw_reset_timeout(bp)) {
2138
device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
2139
rc = -ETIMEDOUT;
2140
goto fw_reset_abort;
2141
}
2142
bnxt_queue_fw_reset_work(bp, HZ / 1000);
2143
return;
2144
}
2145
}
2146
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2147
clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2148
if (!pci_is_enabled(bp->pdev)) {
2149
if (pci_enable_device(bp->pdev)) {
2150
device_printf(bp->dev, "Cannot re-enable PCI device\n");
2151
rc = -ENODEV;
2152
goto fw_reset_abort;
2153
}
2154
}
2155
pci_set_master(bp->pdev);
2156
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
2157
fallthrough;
2158
case BNXT_FW_RESET_STATE_POLL_FW:
2159
bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
2160
rc = bnxt_hwrm_poll(bp);
2161
if (rc) {
2162
if (bnxt_fw_reset_timeout(bp)) {
2163
device_printf(bp->dev, "Firmware reset aborted\n");
2164
goto fw_reset_abort_status;
2165
}
2166
bnxt_queue_fw_reset_work(bp, HZ / 5);
2167
return;
2168
}
2169
bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
2170
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
2171
fallthrough;
2172
case BNXT_FW_RESET_STATE_OPENING:
2173
rc = bnxt_open(bp);
2174
if (rc) {
2175
device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
2176
bnxt_fw_reset_abort(bp, rc);
2177
rtnl_unlock();
2178
return;
2179
}
2180
2181
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
2182
bp->fw_health->enabled) {
2183
bp->fw_health->last_fw_reset_cnt =
2184
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2185
}
2186
bp->fw_reset_state = 0;
2187
smp_mb__before_atomic();
2188
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2189
bnxt_ulp_start(bp, 0);
2190
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
2191
set_bit(BNXT_STATE_OPEN, &bp->state);
2192
rtnl_unlock();
2193
}
2194
return;
2195
2196
fw_reset_abort_status:
2197
if (bp->fw_health->status_reliable ||
2198
(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
2199
u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2200
2201
device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
2202
}
2203
fw_reset_abort:
2204
rtnl_lock();
2205
bnxt_fw_reset_abort(bp, rc);
2206
rtnl_unlock();
2207
}
2208
2209
static void bnxt_force_fw_reset(struct bnxt_softc *bp)
2210
{
2211
struct bnxt_fw_health *fw_health = bp->fw_health;
2212
u32 wait_dsecs;
2213
2214
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
2215
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2216
return;
2217
bnxt_fw_reset_close(bp);
2218
wait_dsecs = fw_health->master_func_wait_dsecs;
2219
if (fw_health->primary) {
2220
if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
2221
wait_dsecs = 0;
2222
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2223
} else {
2224
bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
2225
wait_dsecs = fw_health->normal_func_wait_dsecs;
2226
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2227
}
2228
2229
bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
2230
bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
2231
bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2232
}
2233
2234
static void bnxt_fw_exception(struct bnxt_softc *bp)
2235
{
2236
device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
2237
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2238
bnxt_rtnl_lock_sp(bp);
2239
bnxt_force_fw_reset(bp);
2240
bnxt_rtnl_unlock_sp(bp);
2241
}
2242
2243
static void __bnxt_fw_recover(struct bnxt_softc *bp)
2244
{
2245
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2246
test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2247
bnxt_fw_reset(bp);
2248
else
2249
bnxt_fw_exception(bp);
2250
}
2251
2252
static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
2253
{
2254
struct bnxt_fw_health *fw_health = bp->fw_health;
2255
2256
if (!fw_health)
2257
return;
2258
2259
if (!fw_health->fw_reporter) {
2260
__bnxt_fw_recover(bp);
2261
return;
2262
}
2263
}
2264
2265
static void bnxt_sp_task(struct work_struct *work)
2266
{
2267
struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
2268
2269
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2270
smp_mb__after_atomic();
2271
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
2272
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2273
return;
2274
}
2275
2276
if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
2277
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2278
test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2279
bnxt_devlink_health_fw_report(bp);
2280
else
2281
bnxt_fw_reset(bp);
2282
}
2283
2284
if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
2285
if (!is_bnxt_fw_ok(bp))
2286
bnxt_devlink_health_fw_report(bp);
2287
}
2288
smp_mb__before_atomic();
2289
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2290
}
2291
2292
/* Device setup and teardown */
2293
static int
2294
bnxt_attach_pre(if_ctx_t ctx)
2295
{
2296
struct bnxt_softc *softc = iflib_get_softc(ctx);
2297
if_softc_ctx_t scctx;
2298
int rc = 0;
2299
2300
softc->ctx = ctx;
2301
softc->dev = iflib_get_dev(ctx);
2302
softc->media = iflib_get_media(ctx);
2303
softc->scctx = iflib_get_softc_ctx(ctx);
2304
softc->sctx = iflib_get_sctx(ctx);
2305
scctx = softc->scctx;
2306
2307
/* TODO: Better way of detecting NPAR/VF is needed */
2308
switch (pci_get_device(softc->dev)) {
2309
case BCM57402_NPAR:
2310
case BCM57404_NPAR:
2311
case BCM57406_NPAR:
2312
case BCM57407_NPAR:
2313
case BCM57412_NPAR1:
2314
case BCM57412_NPAR2:
2315
case BCM57414_NPAR1:
2316
case BCM57414_NPAR2:
2317
case BCM57416_NPAR1:
2318
case BCM57416_NPAR2:
2319
case BCM57504_NPAR:
2320
softc->flags |= BNXT_FLAG_NPAR;
2321
break;
2322
case NETXTREME_C_VF1:
2323
case NETXTREME_C_VF2:
2324
case NETXTREME_C_VF3:
2325
case NETXTREME_E_VF1:
2326
case NETXTREME_E_VF2:
2327
case NETXTREME_E_VF3:
2328
softc->flags |= BNXT_FLAG_VF;
2329
break;
2330
}
2331
2332
softc->domain = pci_get_domain(softc->dev);
2333
softc->bus = pci_get_bus(softc->dev);
2334
softc->slot = pci_get_slot(softc->dev);
2335
softc->function = pci_get_function(softc->dev);
2336
softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
2337
2338
if (bnxt_num_pfs == 0)
2339
SLIST_INIT(&pf_list);
2340
bnxt_num_pfs++;
2341
softc->list.softc = softc;
2342
SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
2343
2344
pci_enable_busmaster(softc->dev);
2345
2346
if (bnxt_pci_mapping(softc)) {
2347
device_printf(softc->dev, "PCI mapping failed\n");
2348
rc = ENXIO;
2349
goto pci_map_fail;
2350
}
2351
2352
softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
2353
if (!softc->pdev) {
2354
device_printf(softc->dev, "pdev alloc failed\n");
2355
rc = -ENOMEM;
2356
goto free_pci_map;
2357
}
2358
2359
rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
2360
if (rc) {
2361
device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
2362
goto pci_attach_fail;
2363
}
2364
2365
/* HWRM setup/init */
2366
BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
2367
rc = bnxt_alloc_hwrm_dma_mem(softc);
2368
if (rc)
2369
goto dma_fail;
2370
2371
/* Get firmware version and compare with driver */
2372
softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
2373
M_DEVBUF, M_NOWAIT | M_ZERO);
2374
if (softc->ver_info == NULL) {
2375
rc = ENOMEM;
2376
device_printf(softc->dev,
2377
"Unable to allocate space for version info\n");
2378
goto ver_alloc_fail;
2379
}
2380
/* Default minimum required HWRM version */
2381
softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
2382
softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
2383
softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
2384
2385
rc = bnxt_hwrm_ver_get(softc);
2386
if (rc) {
2387
device_printf(softc->dev, "attach: hwrm ver get failed\n");
2388
goto ver_fail;
2389
}
2390
2391
/* Now perform a function reset */
2392
rc = bnxt_hwrm_func_reset(softc);
2393
2394
if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
2395
softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
2396
rc = bnxt_alloc_hwrm_short_cmd_req(softc);
2397
if (rc)
2398
goto hwrm_short_cmd_alloc_fail;
2399
}
2400
2401
if ((softc->ver_info->chip_num == BCM57508) ||
2402
(softc->ver_info->chip_num == BCM57504) ||
2403
(softc->ver_info->chip_num == BCM57504_NPAR) ||
2404
(softc->ver_info->chip_num == BCM57502) ||
2405
(softc->ver_info->chip_num == BCM57601) ||
2406
(softc->ver_info->chip_num == BCM57602) ||
2407
(softc->ver_info->chip_num == BCM57604))
2408
softc->flags |= BNXT_FLAG_CHIP_P5;
2409
2410
if (softc->ver_info->chip_num == BCM57608)
2411
softc->flags |= BNXT_FLAG_CHIP_P7;
2412
2413
softc->flags |= BNXT_FLAG_TPA;
2414
2415
if (BNXT_CHIP_P5_PLUS(softc) && (!softc->ver_info->chip_rev) &&
2416
(!softc->ver_info->chip_metal))
2417
softc->flags &= ~BNXT_FLAG_TPA;
2418
2419
if (BNXT_CHIP_P5_PLUS(softc))
2420
softc->flags &= ~BNXT_FLAG_TPA;
2421
2422
/* Get NVRAM info */
2423
if (BNXT_PF(softc)) {
2424
if (!bnxt_pf_wq) {
2425
bnxt_pf_wq =
2426
create_singlethread_workqueue("bnxt_pf_wq");
2427
if (!bnxt_pf_wq) {
2428
device_printf(softc->dev, "Unable to create workqueue.\n");
2429
rc = -ENOMEM;
2430
goto nvm_alloc_fail;
2431
}
2432
}
2433
2434
softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
2435
M_DEVBUF, M_NOWAIT | M_ZERO);
2436
if (softc->nvm_info == NULL) {
2437
rc = ENOMEM;
2438
device_printf(softc->dev,
2439
"Unable to allocate space for NVRAM info\n");
2440
goto nvm_alloc_fail;
2441
}
2442
2443
rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
2444
&softc->nvm_info->device_id, &softc->nvm_info->sector_size,
2445
&softc->nvm_info->size, &softc->nvm_info->reserved_size,
2446
&softc->nvm_info->available_size);
2447
}
2448
2449
if (BNXT_CHIP_P5(softc)) {
2450
softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
2451
softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
2452
softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
2453
softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
2454
softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
2455
} else if (BNXT_CHIP_P7(softc)) {
2456
softc->db_ops.bnxt_db_tx = bnxt_thor2_db_tx;
2457
softc->db_ops.bnxt_db_rx = bnxt_thor2_db_rx;
2458
softc->db_ops.bnxt_db_rx_cq = bnxt_thor2_db_rx_cq;
2459
softc->db_ops.bnxt_db_tx_cq = bnxt_thor2_db_tx_cq;
2460
softc->db_ops.bnxt_db_nq = bnxt_thor2_db_nq;
2461
} else {
2462
softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
2463
softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
2464
softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
2465
softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
2466
}
2467
2468
2469
/* Get the queue config */
2470
rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2471
if (rc) {
2472
device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
2473
goto failed;
2474
}
2475
if (softc->is_asym_q) {
2476
rc = bnxt_hwrm_queue_qportcfg(softc,
2477
HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2478
if (rc) {
2479
device_printf(softc->dev, "attach: hwrm qportcfg (rx) failed\n");
2480
return rc;
2481
}
2482
bnxt_verify_asym_queues(softc);
2483
} else {
2484
softc->rx_max_q = softc->tx_max_q;
2485
memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
2486
memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
2487
}
2488
2489
/* Get the HW capabilities */
2490
rc = bnxt_hwrm_func_qcaps(softc);
2491
if (rc)
2492
goto failed;
2493
2494
/*
2495
* Register the driver with the FW
2496
* Register the async events with the FW
2497
*/
2498
rc = bnxt_drv_rgtr(softc);
2499
if (rc)
2500
goto failed;
2501
2502
if (softc->hwrm_spec_code >= 0x10803) {
2503
rc = bnxt_alloc_ctx_mem(softc);
2504
if (rc) {
2505
device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
2506
return rc;
2507
}
2508
rc = bnxt_hwrm_func_resc_qcaps(softc, true);
2509
if (!rc)
2510
softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2511
}
2512
2513
/* Get the current configuration of this function */
2514
rc = bnxt_hwrm_func_qcfg(softc);
2515
if (rc) {
2516
device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2517
goto failed;
2518
}
2519
2520
iflib_set_mac(ctx, softc->func.mac_addr);
2521
2522
scctx->isc_txrx = &bnxt_txrx;
2523
scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
2524
CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
2525
scctx->isc_capabilities = scctx->isc_capenable =
2526
/* These are translated to hwassit bits */
2527
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
2528
/* These are checked by iflib */
2529
IFCAP_LRO | IFCAP_VLAN_HWFILTER |
2530
/* These are part of the iflib mask */
2531
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
2532
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
2533
/* These likely get lost... */
2534
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
2535
2536
if (bnxt_wol_supported(softc))
2537
scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
2538
bnxt_get_wol_settings(softc);
2539
if (softc->wol)
2540
scctx->isc_capenable |= IFCAP_WOL_MAGIC;
2541
2542
/* Get the queue config */
2543
bnxt_get_wol_settings(softc);
2544
if (BNXT_CHIP_P5_PLUS(softc))
2545
bnxt_hwrm_reserve_pf_rings(softc);
2546
rc = bnxt_hwrm_func_qcfg(softc);
2547
if (rc) {
2548
device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2549
goto failed;
2550
}
2551
2552
bnxt_clear_ids(softc);
2553
if (rc)
2554
goto failed;
2555
2556
/* Now set up iflib sc */
2557
scctx->isc_tx_nsegments = 31,
2558
scctx->isc_tx_tso_segments_max = 31;
2559
scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
2560
scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
2561
scctx->isc_vectors = softc->func.max_cp_rings;
2562
scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
2563
scctx->isc_txrx = &bnxt_txrx;
2564
2565
if (scctx->isc_nrxd[0] <
2566
((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
2567
device_printf(softc->dev,
2568
"WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d). Driver may be unstable\n",
2569
scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
2570
if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
2571
device_printf(softc->dev,
2572
"WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d). Driver may be unstable\n",
2573
scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
2574
scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
2575
scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
2576
scctx->isc_ntxd[1];
2577
scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
2578
scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
2579
scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
2580
scctx->isc_nrxd[1];
2581
scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
2582
scctx->isc_nrxd[2];
2583
2584
scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
2585
softc->fn_qcfg.alloc_completion_rings - 1);
2586
scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2587
softc->fn_qcfg.alloc_rx_rings);
2588
scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2589
softc->fn_qcfg.alloc_vnics);
2590
scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
2591
softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
2592
2593
scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
2594
scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
2595
2596
/* iflib will map and release this bar */
2597
scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
2598
2599
/*
2600
* Default settings for HW LRO (TPA):
2601
* Disable HW LRO by default
2602
* Can be enabled after taking care of 'packet forwarding'
2603
*/
2604
if (softc->flags & BNXT_FLAG_TPA) {
2605
softc->hw_lro.enable = 0;
2606
softc->hw_lro.is_mode_gro = 0;
2607
softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
2608
softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
2609
softc->hw_lro.min_agg_len = 512;
2610
}
2611
2612
/* Allocate the default completion ring */
2613
softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
2614
softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2615
softc->def_cp_ring.ring.softc = softc;
2616
softc->def_cp_ring.ring.id = 0;
2617
softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
2618
softc->legacy_db_size : softc->def_cp_ring.ring.id * 0x80;
2619
softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
2620
sizeof(struct cmpl_base);
2621
softc->def_cp_ring.ring.db_ring_mask = softc->def_cp_ring.ring.ring_size -1 ;
2622
rc = iflib_dma_alloc(ctx,
2623
sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
2624
&softc->def_cp_ring_mem, 0);
2625
softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
2626
softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
2627
iflib_config_task_init(ctx, &softc->def_cp_task, bnxt_def_cp_task);
2628
2629
rc = bnxt_init_sysctl_ctx(softc);
2630
if (rc)
2631
goto init_sysctl_failed;
2632
if (BNXT_PF(softc)) {
2633
rc = bnxt_create_nvram_sysctls(softc->nvm_info);
2634
if (rc)
2635
goto failed;
2636
}
2637
2638
arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
2639
softc->vnic_info.rss_hash_type =
2640
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
2641
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
2642
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
2643
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
2644
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
2645
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
2646
rc = bnxt_create_config_sysctls_pre(softc);
2647
if (rc)
2648
goto failed;
2649
2650
rc = bnxt_create_hw_lro_sysctls(softc);
2651
if (rc)
2652
goto failed;
2653
2654
rc = bnxt_create_pause_fc_sysctls(softc);
2655
if (rc)
2656
goto failed;
2657
2658
rc = bnxt_create_dcb_sysctls(softc);
2659
if (rc)
2660
goto failed;
2661
2662
set_bit(BNXT_STATE_OPEN, &softc->state);
2663
INIT_WORK(&softc->sp_task, bnxt_sp_task);
2664
INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
2665
2666
/* Initialize the vlan list */
2667
SLIST_INIT(&softc->vnic_info.vlan_tags);
2668
softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
2669
softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
2670
M_WAITOK|M_ZERO);
2671
2672
return (rc);
2673
2674
failed:
2675
bnxt_free_sysctl_ctx(softc);
2676
init_sysctl_failed:
2677
bnxt_hwrm_func_drv_unrgtr(softc, false);
2678
if (BNXT_PF(softc))
2679
free(softc->nvm_info, M_DEVBUF);
2680
nvm_alloc_fail:
2681
bnxt_free_hwrm_short_cmd_req(softc);
2682
hwrm_short_cmd_alloc_fail:
2683
ver_fail:
2684
free(softc->ver_info, M_DEVBUF);
2685
ver_alloc_fail:
2686
bnxt_free_hwrm_dma_mem(softc);
2687
dma_fail:
2688
BNXT_HWRM_LOCK_DESTROY(softc);
2689
if (softc->pdev)
2690
linux_pci_detach_device(softc->pdev);
2691
pci_attach_fail:
2692
kfree(softc->pdev);
2693
softc->pdev = NULL;
2694
free_pci_map:
2695
bnxt_pci_mapping_free(softc);
2696
pci_map_fail:
2697
pci_disable_busmaster(softc->dev);
2698
return (rc);
2699
}
2700
2701
static int
2702
bnxt_attach_post(if_ctx_t ctx)
2703
{
2704
struct bnxt_softc *softc = iflib_get_softc(ctx);
2705
if_t ifp = iflib_get_ifp(ctx);
2706
int rc;
2707
2708
softc->ifp = ifp;
2709
bnxt_create_config_sysctls_post(softc);
2710
2711
/* Update link state etc... */
2712
rc = bnxt_probe_phy(softc);
2713
if (rc)
2714
goto failed;
2715
2716
/* Needs to be done after probing the phy */
2717
bnxt_create_ver_sysctls(softc);
2718
ifmedia_removeall(softc->media);
2719
bnxt_add_media_types(softc);
2720
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
2721
2722
softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
2723
ETHER_CRC_LEN;
2724
2725
softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2726
bnxt_dcb_init(softc);
2727
bnxt_rdma_aux_device_init(softc);
2728
2729
failed:
2730
return rc;
2731
}
2732
2733
static int
2734
bnxt_detach(if_ctx_t ctx)
2735
{
2736
struct bnxt_softc *softc = iflib_get_softc(ctx);
2737
struct bnxt_vlan_tag *tag;
2738
struct bnxt_vlan_tag *tmp;
2739
int i;
2740
2741
bnxt_rdma_aux_device_uninit(softc);
2742
cancel_delayed_work_sync(&softc->fw_reset_task);
2743
cancel_work_sync(&softc->sp_task);
2744
bnxt_dcb_free(softc);
2745
SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
2746
bnxt_num_pfs--;
2747
bnxt_wol_config(ctx);
2748
bnxt_do_disable_intr(&softc->def_cp_ring);
2749
bnxt_free_sysctl_ctx(softc);
2750
bnxt_hwrm_func_reset(softc);
2751
bnxt_free_ctx_mem(softc);
2752
bnxt_clear_ids(softc);
2753
iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2754
/* We need to free() these here... */
2755
for (i = softc->nrxqsets-1; i>=0; i--) {
2756
if (BNXT_CHIP_P5_PLUS(softc))
2757
iflib_irq_free(ctx, &softc->nq_rings[i].irq);
2758
else
2759
iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2760
2761
}
2762
iflib_dma_free(&softc->vnic_info.mc_list);
2763
iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
2764
iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
2765
if (softc->vnic_info.vlan_tag_list.idi_vaddr)
2766
iflib_dma_free(&softc->vnic_info.vlan_tag_list);
2767
SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
2768
free(tag, M_DEVBUF);
2769
iflib_dma_free(&softc->def_cp_ring_mem);
2770
for (i = 0; i < softc->nrxqsets; i++)
2771
free(softc->rx_rings[i].tpa_start, M_DEVBUF);
2772
free(softc->ver_info, M_DEVBUF);
2773
if (BNXT_PF(softc))
2774
free(softc->nvm_info, M_DEVBUF);
2775
2776
bnxt_hwrm_func_drv_unrgtr(softc, false);
2777
bnxt_free_hwrm_dma_mem(softc);
2778
bnxt_free_hwrm_short_cmd_req(softc);
2779
BNXT_HWRM_LOCK_DESTROY(softc);
2780
2781
if (!bnxt_num_pfs && bnxt_pf_wq)
2782
destroy_workqueue(bnxt_pf_wq);
2783
2784
if (softc->pdev)
2785
linux_pci_detach_device(softc->pdev);
2786
free(softc->state_bv, M_DEVBUF);
2787
pci_disable_busmaster(softc->dev);
2788
bnxt_pci_mapping_free(softc);
2789
2790
return 0;
2791
}
2792
2793
static void
2794
bnxt_hwrm_resource_free(struct bnxt_softc *softc)
2795
{
2796
int i, rc = 0;
2797
2798
rc = bnxt_hwrm_ring_free(softc,
2799
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2800
&softc->def_cp_ring.ring,
2801
(uint16_t)HWRM_NA_SIGNATURE);
2802
if (rc)
2803
goto fail;
2804
2805
for (i = 0; i < softc->ntxqsets; i++) {
2806
rc = bnxt_hwrm_ring_free(softc,
2807
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2808
&softc->tx_rings[i],
2809
softc->tx_cp_rings[i].ring.phys_id);
2810
if (rc)
2811
goto fail;
2812
2813
rc = bnxt_hwrm_ring_free(softc,
2814
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2815
&softc->tx_cp_rings[i].ring,
2816
(uint16_t)HWRM_NA_SIGNATURE);
2817
if (rc)
2818
goto fail;
2819
2820
rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
2821
if (rc)
2822
goto fail;
2823
}
2824
rc = bnxt_hwrm_free_filter(softc);
2825
if (rc)
2826
goto fail;
2827
2828
rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
2829
if (rc)
2830
goto fail;
2831
2832
rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
2833
if (rc)
2834
goto fail;
2835
2836
for (i = 0; i < softc->nrxqsets; i++) {
2837
rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
2838
if (rc)
2839
goto fail;
2840
2841
rc = bnxt_hwrm_ring_free(softc,
2842
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2843
&softc->ag_rings[i],
2844
(uint16_t)HWRM_NA_SIGNATURE);
2845
if (rc)
2846
goto fail;
2847
2848
rc = bnxt_hwrm_ring_free(softc,
2849
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
2850
&softc->rx_rings[i],
2851
softc->rx_cp_rings[i].ring.phys_id);
2852
if (rc)
2853
goto fail;
2854
2855
rc = bnxt_hwrm_ring_free(softc,
2856
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2857
&softc->rx_cp_rings[i].ring,
2858
(uint16_t)HWRM_NA_SIGNATURE);
2859
if (rc)
2860
goto fail;
2861
2862
if (BNXT_CHIP_P5_PLUS(softc)) {
2863
rc = bnxt_hwrm_ring_free(softc,
2864
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2865
&softc->nq_rings[i].ring,
2866
(uint16_t)HWRM_NA_SIGNATURE);
2867
if (rc)
2868
goto fail;
2869
}
2870
2871
rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
2872
if (rc)
2873
goto fail;
2874
}
2875
2876
fail:
2877
return;
2878
}
2879
2880
2881
static void
2882
bnxt_func_reset(struct bnxt_softc *softc)
2883
{
2884
2885
if (!BNXT_CHIP_P5_PLUS(softc)) {
2886
bnxt_hwrm_func_reset(softc);
2887
return;
2888
}
2889
2890
bnxt_hwrm_resource_free(softc);
2891
return;
2892
}
2893
2894
static void
2895
bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
2896
{
2897
uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
2898
int i, j;
2899
2900
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
2901
if (BNXT_CHIP_P5_PLUS(softc)) {
2902
rgt[i++] = htole16(softc->rx_rings[j].phys_id);
2903
rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
2904
} else {
2905
rgt[i] = htole16(softc->grp_info[j].grp_id);
2906
}
2907
if (++j == softc->nrxqsets)
2908
j = 0;
2909
}
2910
}
2911
2912
static void bnxt_get_port_module_status(struct bnxt_softc *softc)
2913
{
2914
struct bnxt_link_info *link_info = &softc->link_info;
2915
struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
2916
uint8_t module_status;
2917
2918
if (bnxt_update_link(softc, false))
2919
return;
2920
2921
module_status = link_info->module_status;
2922
switch (module_status) {
2923
case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX:
2924
case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN:
2925
case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG:
2926
device_printf(softc->dev, "Unqualified SFP+ module detected on port %d\n",
2927
softc->pf.port_id);
2928
if (softc->hwrm_spec_code >= 0x10201) {
2929
device_printf(softc->dev, "Module part number %s\n",
2930
resp->phy_vendor_partnumber);
2931
}
2932
if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX)
2933
device_printf(softc->dev, "TX is disabled\n");
2934
if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN)
2935
device_printf(softc->dev, "SFP+ module is shutdown\n");
2936
}
2937
}
2938
2939
static void bnxt_aux_dev_free(struct bnxt_softc *softc)
2940
{
2941
kfree(softc->aux_dev);
2942
softc->aux_dev = NULL;
2943
}
2944
2945
static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
2946
{
2947
struct bnxt_aux_dev *bnxt_adev;
2948
2949
msleep(1000 * 2);
2950
bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
2951
if (!bnxt_adev)
2952
return ERR_PTR(-ENOMEM);
2953
2954
return bnxt_adev;
2955
}
2956
2957
static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
2958
{
2959
struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
2960
2961
/* Skip if no auxiliary device init was done. */
2962
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2963
return;
2964
2965
if (IS_ERR_OR_NULL(bnxt_adev))
2966
return;
2967
2968
bnxt_rdma_aux_device_del(softc);
2969
2970
if (bnxt_adev->id >= 0)
2971
ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
2972
2973
bnxt_aux_dev_free(softc);
2974
}
2975
2976
static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
2977
{
2978
int rc;
2979
2980
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2981
return;
2982
2983
softc->aux_dev = bnxt_aux_dev_init(softc);
2984
if (IS_ERR_OR_NULL(softc->aux_dev)) {
2985
device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
2986
goto skip_aux_init;
2987
}
2988
2989
softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
2990
if (softc->aux_dev->id < 0) {
2991
device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
2992
bnxt_aux_dev_free(softc);
2993
goto skip_aux_init;
2994
}
2995
2996
msleep(1000 * 2);
2997
/* If aux bus init fails, continue with netdev init. */
2998
rc = bnxt_rdma_aux_device_add(softc);
2999
if (rc) {
3000
device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
3001
msleep(1000 * 2);
3002
ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
3003
}
3004
device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
3005
__func__, __LINE__, softc->aux_dev->id);
3006
skip_aux_init:
3007
return;
3008
}
3009
3010
/* Device configuration */
3011
static void
3012
bnxt_init(if_ctx_t ctx)
3013
{
3014
struct bnxt_softc *softc = iflib_get_softc(ctx);
3015
struct ifmediareq ifmr;
3016
int i;
3017
int rc;
3018
3019
if (!BNXT_CHIP_P5_PLUS(softc)) {
3020
rc = bnxt_hwrm_func_reset(softc);
3021
if (rc)
3022
return;
3023
} else if (softc->is_dev_init) {
3024
bnxt_stop(ctx);
3025
}
3026
3027
softc->is_dev_init = true;
3028
bnxt_clear_ids(softc);
3029
3030
if (BNXT_CHIP_P5_PLUS(softc))
3031
goto skip_def_cp_ring;
3032
/* Allocate the default completion ring */
3033
softc->def_cp_ring.cons = UINT32_MAX;
3034
softc->def_cp_ring.v_bit = 1;
3035
bnxt_mark_cpr_invalid(&softc->def_cp_ring);
3036
rc = bnxt_hwrm_ring_alloc(softc,
3037
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3038
&softc->def_cp_ring.ring);
3039
bnxt_set_db_mask(softc, &softc->def_cp_ring.ring,
3040
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3041
if (rc)
3042
goto fail;
3043
skip_def_cp_ring:
3044
for (i = 0; i < softc->nrxqsets; i++) {
3045
/* Allocate the statistics context */
3046
rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
3047
softc->rx_stats[i].idi_paddr);
3048
if (rc)
3049
goto fail;
3050
3051
if (BNXT_CHIP_P5_PLUS(softc)) {
3052
/* Allocate the NQ */
3053
softc->nq_rings[i].cons = 0;
3054
softc->nq_rings[i].raw_cons = 0;
3055
softc->nq_rings[i].v_bit = 1;
3056
softc->nq_rings[i].last_idx = UINT32_MAX;
3057
bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
3058
rc = bnxt_hwrm_ring_alloc(softc,
3059
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
3060
&softc->nq_rings[i].ring);
3061
bnxt_set_db_mask(softc, &softc->nq_rings[i].ring,
3062
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ);
3063
if (rc)
3064
goto fail;
3065
3066
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3067
}
3068
/* Allocate the completion ring */
3069
softc->rx_cp_rings[i].cons = UINT32_MAX;
3070
softc->rx_cp_rings[i].raw_cons = UINT32_MAX;
3071
softc->rx_cp_rings[i].v_bit = 1;
3072
softc->rx_cp_rings[i].last_idx = UINT32_MAX;
3073
softc->rx_cp_rings[i].toggle = 0;
3074
bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
3075
rc = bnxt_hwrm_ring_alloc(softc,
3076
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3077
&softc->rx_cp_rings[i].ring);
3078
bnxt_set_db_mask(softc, &softc->rx_cp_rings[i].ring,
3079
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3080
if (rc)
3081
goto fail;
3082
3083
if (BNXT_CHIP_P5_PLUS(softc))
3084
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3085
3086
/* Allocate the RX ring */
3087
rc = bnxt_hwrm_ring_alloc(softc,
3088
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
3089
bnxt_set_db_mask(softc, &softc->rx_rings[i],
3090
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX);
3091
if (rc)
3092
goto fail;
3093
softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
3094
3095
/* Allocate the AG ring */
3096
rc = bnxt_hwrm_ring_alloc(softc,
3097
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
3098
&softc->ag_rings[i]);
3099
bnxt_set_db_mask(softc, &softc->ag_rings[i],
3100
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG);
3101
if (rc)
3102
goto fail;
3103
softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
3104
3105
/* Allocate the ring group */
3106
softc->grp_info[i].stats_ctx =
3107
softc->rx_cp_rings[i].stats_ctx_id;
3108
softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
3109
softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
3110
softc->grp_info[i].cp_ring_id =
3111
softc->rx_cp_rings[i].ring.phys_id;
3112
rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
3113
if (rc)
3114
goto fail;
3115
}
3116
3117
/* And now set the default CP / NQ ring for the async */
3118
rc = bnxt_cfg_async_cr(softc);
3119
if (rc)
3120
goto fail;
3121
3122
/* Allocate the VNIC RSS context */
3123
rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
3124
if (rc)
3125
goto fail;
3126
3127
/* Allocate the vnic */
3128
softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
3129
softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
3130
rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
3131
if (rc)
3132
goto fail;
3133
rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
3134
if (rc)
3135
goto fail;
3136
rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
3137
if (rc)
3138
goto fail;
3139
rc = bnxt_hwrm_set_filter(softc);
3140
if (rc)
3141
goto fail;
3142
3143
bnxt_rss_grp_tbl_init(softc);
3144
3145
rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
3146
softc->vnic_info.rss_hash_type);
3147
if (rc)
3148
goto fail;
3149
3150
rc = bnxt_hwrm_vnic_tpa_cfg(softc);
3151
if (rc)
3152
goto fail;
3153
3154
for (i = 0; i < softc->ntxqsets; i++) {
3155
/* Allocate the statistics context */
3156
rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
3157
softc->tx_stats[i].idi_paddr);
3158
if (rc)
3159
goto fail;
3160
3161
/* Allocate the completion ring */
3162
softc->tx_cp_rings[i].cons = UINT32_MAX;
3163
softc->tx_cp_rings[i].raw_cons = UINT32_MAX;
3164
softc->tx_cp_rings[i].v_bit = 1;
3165
softc->tx_cp_rings[i].toggle = 0;
3166
bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
3167
rc = bnxt_hwrm_ring_alloc(softc,
3168
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3169
&softc->tx_cp_rings[i].ring);
3170
bnxt_set_db_mask(softc, &softc->tx_cp_rings[i].ring,
3171
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3172
if (rc)
3173
goto fail;
3174
3175
if (BNXT_CHIP_P5_PLUS(softc))
3176
softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
3177
3178
/* Allocate the TX ring */
3179
rc = bnxt_hwrm_ring_alloc(softc,
3180
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
3181
&softc->tx_rings[i]);
3182
bnxt_set_db_mask(softc, &softc->tx_rings[i],
3183
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX);
3184
if (rc)
3185
goto fail;
3186
softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
3187
}
3188
3189
bnxt_do_enable_intr(&softc->def_cp_ring);
3190
bnxt_get_port_module_status(softc);
3191
bnxt_media_status(softc->ctx, &ifmr);
3192
bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3193
return;
3194
3195
fail:
3196
bnxt_func_reset(softc);
3197
bnxt_clear_ids(softc);
3198
return;
3199
}
3200
3201
static void
3202
bnxt_stop(if_ctx_t ctx)
3203
{
3204
struct bnxt_softc *softc = iflib_get_softc(ctx);
3205
3206
softc->is_dev_init = false;
3207
bnxt_do_disable_intr(&softc->def_cp_ring);
3208
bnxt_func_reset(softc);
3209
bnxt_clear_ids(softc);
3210
return;
3211
}
3212
3213
static u_int
3214
bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3215
{
3216
uint8_t *mta = arg;
3217
3218
if (cnt == BNXT_MAX_MC_ADDRS)
3219
return (1);
3220
3221
bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3222
3223
return (1);
3224
}
3225
3226
static void
3227
bnxt_multi_set(if_ctx_t ctx)
3228
{
3229
struct bnxt_softc *softc = iflib_get_softc(ctx);
3230
if_t ifp = iflib_get_ifp(ctx);
3231
uint8_t *mta;
3232
int mcnt;
3233
3234
mta = softc->vnic_info.mc_list.idi_vaddr;
3235
bzero(mta, softc->vnic_info.mc_list.idi_size);
3236
mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
3237
3238
if (mcnt > BNXT_MAX_MC_ADDRS) {
3239
softc->vnic_info.rx_mask |=
3240
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3241
bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3242
} else {
3243
softc->vnic_info.rx_mask &=
3244
~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3245
bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
3246
softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
3247
softc->vnic_info.mc_list_count = mcnt;
3248
softc->vnic_info.rx_mask |=
3249
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
3250
if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
3251
device_printf(softc->dev,
3252
"set_multi: rx_mask set failed\n");
3253
}
3254
}
3255
3256
static int
3257
bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
3258
{
3259
struct bnxt_softc *softc = iflib_get_softc(ctx);
3260
3261
if (mtu > BNXT_MAX_MTU)
3262
return EINVAL;
3263
3264
softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3265
softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
3266
return 0;
3267
}
3268
3269
static void
3270
bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
3271
{
3272
struct bnxt_softc *softc = iflib_get_softc(ctx);
3273
struct bnxt_link_info *link_info = &softc->link_info;
3274
struct ifmedia_entry *next;
3275
uint64_t target_baudrate = bnxt_get_baudrate(link_info);
3276
int active_media = IFM_UNKNOWN;
3277
3278
bnxt_update_link(softc, true);
3279
3280
ifmr->ifm_status = IFM_AVALID;
3281
ifmr->ifm_active = IFM_ETHER;
3282
3283
if (link_info->link_up)
3284
ifmr->ifm_status |= IFM_ACTIVE;
3285
else
3286
ifmr->ifm_status &= ~IFM_ACTIVE;
3287
3288
if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3289
ifmr->ifm_active |= IFM_FDX;
3290
else
3291
ifmr->ifm_active |= IFM_HDX;
3292
3293
/*
3294
* Go through the list of supported media which got prepared
3295
* as part of bnxt_add_media_types() using api ifmedia_add().
3296
*/
3297
LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
3298
if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
3299
active_media = next->ifm_media;
3300
break;
3301
}
3302
}
3303
ifmr->ifm_active |= active_media;
3304
3305
if (link_info->flow_ctrl.rx)
3306
ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3307
if (link_info->flow_ctrl.tx)
3308
ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3309
3310
bnxt_report_link(softc);
3311
return;
3312
}
3313
3314
static int
3315
bnxt_media_change(if_ctx_t ctx)
3316
{
3317
struct bnxt_softc *softc = iflib_get_softc(ctx);
3318
struct ifmedia *ifm = iflib_get_media(ctx);
3319
struct ifmediareq ifmr;
3320
int rc;
3321
struct bnxt_link_info *link_info = &softc->link_info;
3322
3323
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3324
return EINVAL;
3325
3326
switch (IFM_SUBTYPE(ifm->ifm_media)) {
3327
case IFM_100_T:
3328
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3329
link_info->req_link_speed =
3330
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3331
break;
3332
case IFM_1000_KX:
3333
case IFM_1000_SGMII:
3334
case IFM_1000_CX:
3335
case IFM_1000_SX:
3336
case IFM_1000_LX:
3337
3338
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3339
3340
if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) {
3341
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3342
3343
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB) {
3344
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB;
3345
link_info->force_speed2_nrz = true;
3346
}
3347
3348
break;
3349
3350
case IFM_2500_KX:
3351
case IFM_2500_T:
3352
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3353
link_info->req_link_speed =
3354
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3355
break;
3356
case IFM_10G_CR1:
3357
case IFM_10G_KR:
3358
case IFM_10G_LR:
3359
case IFM_10G_SR:
3360
3361
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3362
3363
if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB) {
3364
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3365
3366
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB) {
3367
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB;
3368
link_info->force_speed2_nrz = true;
3369
}
3370
3371
break;
3372
case IFM_20G_KR2:
3373
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3374
link_info->req_link_speed =
3375
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3376
break;
3377
case IFM_25G_CR:
3378
case IFM_25G_KR:
3379
case IFM_25G_SR:
3380
case IFM_25G_LR:
3381
3382
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3383
3384
if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB) {
3385
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3386
3387
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB) {
3388
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB;
3389
link_info->force_speed2_nrz = true;
3390
}
3391
3392
break;
3393
3394
case IFM_40G_CR4:
3395
case IFM_40G_KR4:
3396
case IFM_40G_LR4:
3397
case IFM_40G_SR4:
3398
case IFM_40G_XLAUI:
3399
case IFM_40G_XLAUI_AC:
3400
3401
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3402
3403
if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB) {
3404
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3405
3406
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB) {
3407
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB;
3408
link_info->force_speed2_nrz = true;
3409
}
3410
3411
break;
3412
3413
case IFM_50G_CR2:
3414
case IFM_50G_KR2:
3415
case IFM_50G_KR4:
3416
case IFM_50G_SR2:
3417
case IFM_50G_LR2:
3418
3419
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3420
3421
if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) {
3422
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3423
3424
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB) {
3425
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB;
3426
link_info->force_speed2_nrz = true;
3427
}
3428
3429
break;
3430
3431
case IFM_50G_CP:
3432
case IFM_50G_LR:
3433
case IFM_50G_SR:
3434
case IFM_50G_KR_PAM4:
3435
3436
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3437
3438
if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
3439
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3440
link_info->force_pam4_speed = true;
3441
3442
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56) {
3443
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56;
3444
link_info->force_pam4_56_speed2 = true;
3445
}
3446
3447
break;
3448
3449
case IFM_100G_CR4:
3450
case IFM_100G_KR4:
3451
case IFM_100G_LR4:
3452
case IFM_100G_SR4:
3453
case IFM_100G_AUI4:
3454
3455
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3456
3457
if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) {
3458
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3459
3460
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB) {
3461
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB;
3462
link_info->force_speed2_nrz = true;
3463
}
3464
3465
break;
3466
3467
case IFM_100G_CP2:
3468
case IFM_100G_SR2:
3469
case IFM_100G_KR2_PAM4:
3470
case IFM_100G_AUI2:
3471
3472
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3473
3474
if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
3475
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3476
link_info->force_pam4_speed = true;
3477
3478
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56) {
3479
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56;
3480
link_info->force_pam4_56_speed2 = true;
3481
}
3482
3483
break;
3484
3485
case IFM_100G_KR_PAM4:
3486
case IFM_100G_CR_PAM4:
3487
case IFM_100G_DR:
3488
case IFM_100G_AUI2_AC:
3489
3490
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3491
3492
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112) {
3493
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112;
3494
link_info->force_pam4_112_speed2 = true;
3495
}
3496
3497
break;
3498
3499
case IFM_200G_SR4:
3500
case IFM_200G_FR4:
3501
case IFM_200G_LR4:
3502
case IFM_200G_DR4:
3503
case IFM_200G_CR4_PAM4:
3504
case IFM_200G_KR4_PAM4:
3505
3506
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3507
3508
if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) {
3509
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3510
link_info->force_pam4_speed = true;
3511
3512
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56) {
3513
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56;
3514
link_info->force_pam4_56_speed2 = true;
3515
}
3516
3517
break;
3518
3519
case IFM_200G_AUI4:
3520
3521
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3522
3523
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112) {
3524
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112;
3525
link_info->force_pam4_112_speed2 = true;
3526
}
3527
3528
break;
3529
3530
case IFM_400G_FR8:
3531
case IFM_400G_LR8:
3532
case IFM_400G_AUI8:
3533
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3534
3535
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56) {
3536
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56;
3537
link_info->force_pam4_56_speed2 = true;
3538
}
3539
3540
break;
3541
3542
case IFM_400G_AUI8_AC:
3543
case IFM_400G_DR4:
3544
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3545
3546
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112) {
3547
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112;
3548
link_info->force_pam4_112_speed2 = true;
3549
}
3550
3551
break;
3552
3553
case IFM_1000_T:
3554
link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3555
link_info->autoneg |= BNXT_AUTONEG_SPEED;
3556
break;
3557
case IFM_10G_T:
3558
link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3559
link_info->autoneg |= BNXT_AUTONEG_SPEED;
3560
break;
3561
default:
3562
device_printf(softc->dev,
3563
"Unsupported media type! Using auto\n");
3564
/* Fall-through */
3565
case IFM_AUTO:
3566
// Auto
3567
link_info->autoneg |= BNXT_AUTONEG_SPEED;
3568
break;
3569
}
3570
3571
rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
3572
bnxt_media_status(softc->ctx, &ifmr);
3573
return rc;
3574
}
3575
3576
static int
3577
bnxt_promisc_set(if_ctx_t ctx, int flags)
3578
{
3579
struct bnxt_softc *softc = iflib_get_softc(ctx);
3580
if_t ifp = iflib_get_ifp(ctx);
3581
int rc;
3582
3583
if (if_getflags(ifp) & IFF_ALLMULTI ||
3584
if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
3585
softc->vnic_info.rx_mask |=
3586
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3587
else
3588
softc->vnic_info.rx_mask &=
3589
~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3590
3591
if (if_getflags(ifp) & IFF_PROMISC)
3592
softc->vnic_info.rx_mask |=
3593
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3594
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
3595
else
3596
softc->vnic_info.rx_mask &=
3597
~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
3598
3599
rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3600
3601
return rc;
3602
}
3603
3604
static uint64_t
3605
bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
3606
{
3607
if_t ifp = iflib_get_ifp(ctx);
3608
3609
if (cnt < IFCOUNTERS)
3610
return if_get_counter_default(ifp, cnt);
3611
3612
return 0;
3613
}
3614
3615
static void
3616
bnxt_update_admin_status(if_ctx_t ctx)
3617
{
3618
struct bnxt_softc *softc = iflib_get_softc(ctx);
3619
3620
/*
3621
* When SR-IOV is enabled, avoid each VF sending this HWRM
3622
* request every sec with which firmware timeouts can happen
3623
*/
3624
if (!BNXT_PF(softc))
3625
return;
3626
3627
bnxt_hwrm_port_qstats(softc);
3628
3629
if (BNXT_CHIP_P5_PLUS(softc) &&
3630
(softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
3631
bnxt_hwrm_port_qstats_ext(softc);
3632
3633
if (BNXT_CHIP_P5_PLUS(softc)) {
3634
struct ifmediareq ifmr;
3635
3636
if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
3637
bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3638
bnxt_media_status(softc->ctx, &ifmr);
3639
}
3640
}
3641
3642
return;
3643
}
3644
3645
static void
3646
bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
3647
{
3648
3649
struct bnxt_softc *softc = iflib_get_softc(ctx);
3650
uint64_t ticks_now = ticks;
3651
3652
/* Schedule bnxt_update_admin_status() once per sec */
3653
if (ticks_now - softc->admin_ticks >= hz) {
3654
softc->admin_ticks = ticks_now;
3655
iflib_admin_intr_deferred(ctx);
3656
}
3657
3658
return;
3659
}
3660
3661
static void inline
3662
bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
3663
{
3664
struct bnxt_softc *softc = cpr->ring.softc;
3665
3666
3667
if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3668
return;
3669
3670
if (BNXT_CHIP_P5_PLUS(softc))
3671
softc->db_ops.bnxt_db_nq(cpr, 1);
3672
else
3673
softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3674
}
3675
3676
static void inline
3677
bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
3678
{
3679
struct bnxt_softc *softc = cpr->ring.softc;
3680
3681
if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3682
return;
3683
3684
if (BNXT_CHIP_P5_PLUS(softc))
3685
softc->db_ops.bnxt_db_nq(cpr, 0);
3686
else
3687
softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3688
}
3689
3690
/* Enable all interrupts */
3691
static void
3692
bnxt_intr_enable(if_ctx_t ctx)
3693
{
3694
struct bnxt_softc *softc = iflib_get_softc(ctx);
3695
int i;
3696
3697
bnxt_do_enable_intr(&softc->def_cp_ring);
3698
for (i = 0; i < softc->nrxqsets; i++)
3699
if (BNXT_CHIP_P5_PLUS(softc))
3700
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3701
else
3702
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3703
3704
return;
3705
}
3706
3707
/* Enable interrupt for a single queue */
3708
static int
3709
bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3710
{
3711
struct bnxt_softc *softc = iflib_get_softc(ctx);
3712
3713
if (BNXT_CHIP_P5_PLUS(softc))
3714
softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3715
else
3716
softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
3717
3718
return 0;
3719
}
3720
3721
static void
3722
bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
3723
{
3724
device_printf(softc->dev, "cmd sequence number %d\n",
3725
cmd_cmpl->sequence_id);
3726
return;
3727
}
3728
3729
static void
3730
bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
3731
{
3732
struct bnxt_softc *softc = cpr->ring.softc;
3733
uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
3734
3735
switch (type) {
3736
case HWRM_CMPL_TYPE_HWRM_DONE:
3737
bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
3738
break;
3739
case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
3740
bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
3741
break;
3742
default:
3743
device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
3744
__FUNCTION__, __LINE__, type);
3745
break;
3746
}
3747
}
3748
3749
void
3750
process_nq(struct bnxt_softc *softc, uint16_t nqid)
3751
{
3752
struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
3753
nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
3754
struct bnxt_cp_ring *tx_cpr = &softc->tx_cp_rings[nqid];
3755
struct bnxt_cp_ring *rx_cpr = &softc->rx_cp_rings[nqid];
3756
bool v_bit = cpr->v_bit;
3757
uint32_t cons = cpr->cons;
3758
uint32_t raw_cons = cpr->raw_cons;
3759
uint16_t nq_type, nqe_cnt = 0;
3760
3761
while (1) {
3762
if (!NQ_VALID(&cmp[cons], v_bit)) {
3763
goto done;
3764
}
3765
3766
nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
3767
3768
if (NQE_CN_TYPE(nq_type) != NQ_CN_TYPE_CQ_NOTIFICATION) {
3769
bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
3770
} else {
3771
tx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3772
rx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3773
}
3774
3775
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3776
raw_cons++;
3777
nqe_cnt++;
3778
}
3779
done:
3780
if (nqe_cnt) {
3781
cpr->cons = cons;
3782
cpr->raw_cons = raw_cons;
3783
cpr->v_bit = v_bit;
3784
}
3785
}
3786
3787
static int
3788
bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3789
{
3790
struct bnxt_softc *softc = iflib_get_softc(ctx);
3791
3792
if (BNXT_CHIP_P5_PLUS(softc)) {
3793
process_nq(softc, qid);
3794
softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3795
}
3796
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
3797
return 0;
3798
}
3799
3800
/* Disable all interrupts */
3801
static void
3802
bnxt_disable_intr(if_ctx_t ctx)
3803
{
3804
struct bnxt_softc *softc = iflib_get_softc(ctx);
3805
int i;
3806
3807
/*
3808
* NOTE: These TX interrupts should never get enabled, so don't
3809
* update the index
3810
*/
3811
for (i = 0; i < softc->nrxqsets; i++)
3812
if (BNXT_CHIP_P5_PLUS(softc))
3813
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
3814
else
3815
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
3816
3817
3818
return;
3819
}
3820
3821
static int
3822
bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
3823
{
3824
struct bnxt_softc *softc = iflib_get_softc(ctx);
3825
struct bnxt_cp_ring *ring;
3826
struct if_irq *irq;
3827
uint16_t id;
3828
int rc;
3829
int i;
3830
char irq_name[16];
3831
3832
if (BNXT_CHIP_P5_PLUS(softc))
3833
goto skip_default_cp;
3834
3835
rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
3836
softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
3837
bnxt_handle_def_cp, softc, 0, "def_cp");
3838
if (rc) {
3839
device_printf(iflib_get_dev(ctx),
3840
"Failed to register default completion ring handler\n");
3841
return rc;
3842
}
3843
3844
skip_default_cp:
3845
for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
3846
if (BNXT_CHIP_P5_PLUS(softc)) {
3847
irq = &softc->nq_rings[i].irq;
3848
id = softc->nq_rings[i].ring.id;
3849
ring = &softc->nq_rings[i];
3850
} else {
3851
irq = &softc->rx_cp_rings[i].irq;
3852
id = softc->rx_cp_rings[i].ring.id ;
3853
ring = &softc->rx_cp_rings[i];
3854
}
3855
snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
3856
rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
3857
bnxt_handle_isr, ring, i, irq_name);
3858
if (rc) {
3859
device_printf(iflib_get_dev(ctx),
3860
"Failed to register RX completion ring handler\n");
3861
i--;
3862
goto fail;
3863
}
3864
}
3865
3866
for (i=0; i<softc->scctx->isc_ntxqsets; i++)
3867
iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
3868
3869
return rc;
3870
3871
fail:
3872
for (; i>=0; i--)
3873
iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
3874
iflib_irq_free(ctx, &softc->def_cp_ring.irq);
3875
return rc;
3876
}
3877
3878
/*
3879
* We're explicitly allowing duplicates here. They will need to be
3880
* removed as many times as they are added.
3881
*/
3882
static void
3883
bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
3884
{
3885
struct bnxt_softc *softc = iflib_get_softc(ctx);
3886
struct bnxt_vlan_tag *new_tag;
3887
3888
new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
3889
if (new_tag == NULL)
3890
return;
3891
new_tag->tag = vtag;
3892
new_tag->filter_id = -1;
3893
SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
3894
};
3895
3896
static void
3897
bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
3898
{
3899
struct bnxt_softc *softc = iflib_get_softc(ctx);
3900
struct bnxt_vlan_tag *vlan_tag;
3901
3902
SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
3903
if (vlan_tag->tag == vtag) {
3904
SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
3905
bnxt_vlan_tag, next);
3906
free(vlan_tag, M_DEVBUF);
3907
break;
3908
}
3909
}
3910
}
3911
3912
static int
3913
bnxt_wol_config(if_ctx_t ctx)
3914
{
3915
struct bnxt_softc *softc = iflib_get_softc(ctx);
3916
if_t ifp = iflib_get_ifp(ctx);
3917
3918
if (!softc)
3919
return -EBUSY;
3920
3921
if (!bnxt_wol_supported(softc))
3922
return -ENOTSUP;
3923
3924
if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
3925
if (!softc->wol) {
3926
if (bnxt_hwrm_alloc_wol_fltr(softc))
3927
return -EBUSY;
3928
softc->wol = 1;
3929
}
3930
} else {
3931
if (softc->wol) {
3932
if (bnxt_hwrm_free_wol_fltr(softc))
3933
return -EBUSY;
3934
softc->wol = 0;
3935
}
3936
}
3937
3938
return 0;
3939
}
3940
3941
static bool
3942
bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
3943
{
3944
switch (event) {
3945
case IFLIB_RESTART_VLAN_CONFIG:
3946
default:
3947
return (false);
3948
}
3949
}
3950
3951
static int
3952
bnxt_shutdown(if_ctx_t ctx)
3953
{
3954
bnxt_wol_config(ctx);
3955
return 0;
3956
}
3957
3958
static int
3959
bnxt_suspend(if_ctx_t ctx)
3960
{
3961
bnxt_wol_config(ctx);
3962
return 0;
3963
}
3964
3965
static int
3966
bnxt_resume(if_ctx_t ctx)
3967
{
3968
struct bnxt_softc *softc = iflib_get_softc(ctx);
3969
3970
bnxt_get_wol_settings(softc);
3971
return 0;
3972
}
3973
3974
static int
3975
bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
3976
{
3977
struct bnxt_softc *softc = iflib_get_softc(ctx);
3978
struct ifreq *ifr = (struct ifreq *)data;
3979
struct bnxt_ioctl_header *ioh;
3980
size_t iol;
3981
int rc = ENOTSUP;
3982
struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
3983
3984
switch (command) {
3985
case SIOCGPRIVATE_0:
3986
if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
3987
goto exit;
3988
3989
ioh = ifr_buffer_get_buffer(ifr);
3990
iol = ifr_buffer_get_length(ifr);
3991
if (iol > sizeof(iod_storage))
3992
return (EINVAL);
3993
3994
if ((rc = copyin(ioh, iod, iol)) != 0)
3995
goto exit;
3996
3997
switch (iod->hdr.type) {
3998
case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
3999
{
4000
struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
4001
&iod->find;
4002
4003
rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
4004
&find->ordinal, find->ext, &find->index,
4005
find->use_index, find->search_opt,
4006
&find->data_length, &find->item_length,
4007
&find->fw_ver);
4008
if (rc) {
4009
iod->hdr.rc = rc;
4010
rc = copyout(&iod->hdr.rc, &ioh->rc,
4011
sizeof(ioh->rc));
4012
} else {
4013
iod->hdr.rc = 0;
4014
rc = copyout(iod, ioh, iol);
4015
}
4016
4017
goto exit;
4018
}
4019
case BNXT_HWRM_NVM_READ:
4020
{
4021
struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
4022
struct iflib_dma_info dma_data;
4023
size_t offset;
4024
size_t remain;
4025
size_t csize;
4026
4027
/*
4028
* Some HWRM versions can't read more than 0x8000 bytes
4029
*/
4030
rc = iflib_dma_alloc(softc->ctx,
4031
min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
4032
if (rc)
4033
break;
4034
for (remain = rd->length, offset = 0;
4035
remain && offset < rd->length; offset += 0x8000) {
4036
csize = min(remain, 0x8000);
4037
rc = bnxt_hwrm_nvm_read(softc, rd->index,
4038
rd->offset + offset, csize, &dma_data);
4039
if (rc) {
4040
iod->hdr.rc = rc;
4041
rc = copyout(&iod->hdr.rc, &ioh->rc,
4042
sizeof(ioh->rc));
4043
break;
4044
} else {
4045
rc = copyout(dma_data.idi_vaddr,
4046
rd->data + offset, csize);
4047
iod->hdr.rc = rc;
4048
}
4049
remain -= csize;
4050
}
4051
if (rc == 0)
4052
rc = copyout(iod, ioh, iol);
4053
4054
iflib_dma_free(&dma_data);
4055
goto exit;
4056
}
4057
case BNXT_HWRM_FW_RESET:
4058
{
4059
struct bnxt_ioctl_hwrm_fw_reset *rst =
4060
&iod->reset;
4061
4062
rc = bnxt_hwrm_fw_reset(softc, rst->processor,
4063
&rst->selfreset);
4064
if (rc) {
4065
iod->hdr.rc = rc;
4066
rc = copyout(&iod->hdr.rc, &ioh->rc,
4067
sizeof(ioh->rc));
4068
} else {
4069
iod->hdr.rc = 0;
4070
rc = copyout(iod, ioh, iol);
4071
}
4072
4073
goto exit;
4074
}
4075
case BNXT_HWRM_FW_QSTATUS:
4076
{
4077
struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
4078
&iod->status;
4079
4080
rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
4081
&qstat->selfreset);
4082
if (rc) {
4083
iod->hdr.rc = rc;
4084
rc = copyout(&iod->hdr.rc, &ioh->rc,
4085
sizeof(ioh->rc));
4086
} else {
4087
iod->hdr.rc = 0;
4088
rc = copyout(iod, ioh, iol);
4089
}
4090
4091
goto exit;
4092
}
4093
case BNXT_HWRM_NVM_WRITE:
4094
{
4095
struct bnxt_ioctl_hwrm_nvm_write *wr =
4096
&iod->write;
4097
4098
rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
4099
wr->type, wr->ordinal, wr->ext, wr->attr,
4100
wr->option, wr->data_length, wr->keep,
4101
&wr->item_length, &wr->index);
4102
if (rc) {
4103
iod->hdr.rc = rc;
4104
rc = copyout(&iod->hdr.rc, &ioh->rc,
4105
sizeof(ioh->rc));
4106
}
4107
else {
4108
iod->hdr.rc = 0;
4109
rc = copyout(iod, ioh, iol);
4110
}
4111
4112
goto exit;
4113
}
4114
case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
4115
{
4116
struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
4117
&iod->erase;
4118
4119
rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
4120
if (rc) {
4121
iod->hdr.rc = rc;
4122
rc = copyout(&iod->hdr.rc, &ioh->rc,
4123
sizeof(ioh->rc));
4124
} else {
4125
iod->hdr.rc = 0;
4126
rc = copyout(iod, ioh, iol);
4127
}
4128
4129
goto exit;
4130
}
4131
case BNXT_HWRM_NVM_GET_DIR_INFO:
4132
{
4133
struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
4134
&iod->dir_info;
4135
4136
rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
4137
&info->entry_length);
4138
if (rc) {
4139
iod->hdr.rc = rc;
4140
rc = copyout(&iod->hdr.rc, &ioh->rc,
4141
sizeof(ioh->rc));
4142
} else {
4143
iod->hdr.rc = 0;
4144
rc = copyout(iod, ioh, iol);
4145
}
4146
4147
goto exit;
4148
}
4149
case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
4150
{
4151
struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
4152
&iod->dir_entries;
4153
struct iflib_dma_info dma_data;
4154
4155
rc = iflib_dma_alloc(softc->ctx, get->max_size,
4156
&dma_data, BUS_DMA_NOWAIT);
4157
if (rc)
4158
break;
4159
rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
4160
&get->entry_length, &dma_data);
4161
if (rc) {
4162
iod->hdr.rc = rc;
4163
rc = copyout(&iod->hdr.rc, &ioh->rc,
4164
sizeof(ioh->rc));
4165
} else {
4166
rc = copyout(dma_data.idi_vaddr, get->data,
4167
get->entry_length * get->entries);
4168
iod->hdr.rc = rc;
4169
if (rc == 0)
4170
rc = copyout(iod, ioh, iol);
4171
}
4172
iflib_dma_free(&dma_data);
4173
4174
goto exit;
4175
}
4176
case BNXT_HWRM_NVM_VERIFY_UPDATE:
4177
{
4178
struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
4179
&iod->verify;
4180
4181
rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
4182
vrfy->ordinal, vrfy->ext);
4183
if (rc) {
4184
iod->hdr.rc = rc;
4185
rc = copyout(&iod->hdr.rc, &ioh->rc,
4186
sizeof(ioh->rc));
4187
} else {
4188
iod->hdr.rc = 0;
4189
rc = copyout(iod, ioh, iol);
4190
}
4191
4192
goto exit;
4193
}
4194
case BNXT_HWRM_NVM_INSTALL_UPDATE:
4195
{
4196
struct bnxt_ioctl_hwrm_nvm_install_update *inst =
4197
&iod->install;
4198
4199
rc = bnxt_hwrm_nvm_install_update(softc,
4200
inst->install_type, &inst->installed_items,
4201
&inst->result, &inst->problem_item,
4202
&inst->reset_required);
4203
if (rc) {
4204
iod->hdr.rc = rc;
4205
rc = copyout(&iod->hdr.rc, &ioh->rc,
4206
sizeof(ioh->rc));
4207
} else {
4208
iod->hdr.rc = 0;
4209
rc = copyout(iod, ioh, iol);
4210
}
4211
4212
goto exit;
4213
}
4214
case BNXT_HWRM_NVM_MODIFY:
4215
{
4216
struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
4217
4218
rc = bnxt_hwrm_nvm_modify(softc, mod->index,
4219
mod->offset, mod->data, true, mod->length);
4220
if (rc) {
4221
iod->hdr.rc = rc;
4222
rc = copyout(&iod->hdr.rc, &ioh->rc,
4223
sizeof(ioh->rc));
4224
} else {
4225
iod->hdr.rc = 0;
4226
rc = copyout(iod, ioh, iol);
4227
}
4228
4229
goto exit;
4230
}
4231
case BNXT_HWRM_FW_GET_TIME:
4232
{
4233
struct bnxt_ioctl_hwrm_fw_get_time *gtm =
4234
&iod->get_time;
4235
4236
rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
4237
&gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
4238
&gtm->second, &gtm->millisecond, &gtm->zone);
4239
if (rc) {
4240
iod->hdr.rc = rc;
4241
rc = copyout(&iod->hdr.rc, &ioh->rc,
4242
sizeof(ioh->rc));
4243
} else {
4244
iod->hdr.rc = 0;
4245
rc = copyout(iod, ioh, iol);
4246
}
4247
4248
goto exit;
4249
}
4250
case BNXT_HWRM_FW_SET_TIME:
4251
{
4252
struct bnxt_ioctl_hwrm_fw_set_time *stm =
4253
&iod->set_time;
4254
4255
rc = bnxt_hwrm_fw_set_time(softc, stm->year,
4256
stm->month, stm->day, stm->hour, stm->minute,
4257
stm->second, stm->millisecond, stm->zone);
4258
if (rc) {
4259
iod->hdr.rc = rc;
4260
rc = copyout(&iod->hdr.rc, &ioh->rc,
4261
sizeof(ioh->rc));
4262
} else {
4263
iod->hdr.rc = 0;
4264
rc = copyout(iod, ioh, iol);
4265
}
4266
4267
goto exit;
4268
}
4269
}
4270
break;
4271
}
4272
4273
exit:
4274
return rc;
4275
}
4276
4277
static int
4278
bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
4279
{
4280
struct bnxt_softc *softc = iflib_get_softc(ctx);
4281
uint8_t *data = i2c->data;
4282
int rc;
4283
4284
/* No point in going further if phy status indicates
4285
* module is not inserted or if it is powered down or
4286
* if it is of type 10GBase-T
4287
*/
4288
if (softc->link_info.module_status >
4289
HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG)
4290
return -EOPNOTSUPP;
4291
4292
/* This feature is not supported in older firmware versions */
4293
if (!BNXT_CHIP_P5_PLUS(softc) ||
4294
(softc->hwrm_spec_code < 0x10202))
4295
return -EOPNOTSUPP;
4296
4297
4298
rc = bnxt_read_sfp_module_eeprom_info(softc, I2C_DEV_ADDR_A0, 0, 0, 0,
4299
i2c->offset, i2c->len, data);
4300
4301
return rc;
4302
}
4303
4304
/*
4305
* Support functions
4306
*/
4307
static int
4308
bnxt_probe_phy(struct bnxt_softc *softc)
4309
{
4310
struct bnxt_link_info *link_info = &softc->link_info;
4311
int rc = 0;
4312
4313
softc->phy_flags = 0;
4314
rc = bnxt_hwrm_phy_qcaps(softc);
4315
if (rc) {
4316
device_printf(softc->dev,
4317
"Probe phy can't get phy capabilities (rc: %x)\n", rc);
4318
return rc;
4319
}
4320
4321
rc = bnxt_update_link(softc, false);
4322
if (rc) {
4323
device_printf(softc->dev,
4324
"Probe phy can't update link (rc: %x)\n", rc);
4325
return (rc);
4326
}
4327
4328
bnxt_get_port_module_status(softc);
4329
4330
/*initialize the ethool setting copy with NVM settings */
4331
if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
4332
link_info->autoneg |= BNXT_AUTONEG_SPEED;
4333
4334
link_info->req_duplex = link_info->duplex_setting;
4335
4336
/* NRZ link speed */
4337
if (link_info->autoneg & BNXT_AUTONEG_SPEED)
4338
link_info->req_link_speed = link_info->auto_link_speeds;
4339
else
4340
link_info->req_link_speed = link_info->force_link_speed;
4341
4342
/* PAM4 link speed */
4343
if (link_info->auto_pam4_link_speeds)
4344
link_info->req_link_speed = link_info->auto_pam4_link_speeds;
4345
if (link_info->force_pam4_link_speed)
4346
link_info->req_link_speed = link_info->force_pam4_link_speed;
4347
4348
return (rc);
4349
}
4350
4351
static void
4352
add_media(struct bnxt_softc *softc, u8 media_type, u16 supported_NRZ_speeds,
4353
u16 supported_pam4_speeds, u16 supported_speeds2)
4354
{
4355
4356
switch (media_type) {
4357
case BNXT_MEDIA_CR:
4358
4359
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_CP);
4360
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_CP2);
4361
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
4362
4363
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_CR4);
4364
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_CR2);
4365
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_CR4);
4366
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_CR);
4367
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_CR1);
4368
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4369
/* thor2 nrz*/
4370
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_CR4);
4371
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_CR2);
4372
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_CR4);
4373
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_CR);
4374
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_CR1);
4375
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4376
/* thor2 PAM56 */
4377
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_CP);
4378
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_CP2);
4379
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_CR4_PAM4);
4380
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4381
/* thor2 PAM112 */
4382
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_CR_PAM4);
4383
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4384
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4385
4386
break;
4387
4388
case BNXT_MEDIA_LR:
4389
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_LR);
4390
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_LR4);
4391
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_LR4);
4392
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_LR2);
4393
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_LR4);
4394
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_LR);
4395
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_LR);
4396
/* thor2 nrz*/
4397
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_LR4);
4398
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_LR2);
4399
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_LR4);
4400
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_LR);
4401
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_LR);
4402
/* thor2 PAM56 */
4403
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4404
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4405
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4406
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_LR8);
4407
/* thor2 PAM112 */
4408
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4409
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4410
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4411
4412
break;
4413
4414
case BNXT_MEDIA_SR:
4415
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_SR);
4416
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_SR2);
4417
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_SR4);
4418
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_SR4);
4419
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_SR2);
4420
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_SR4);
4421
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_SR);
4422
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_SR);
4423
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SX);
4424
/* thor2 nrz*/
4425
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_SR4);
4426
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_SR2);
4427
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_SR4);
4428
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_SR);
4429
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_SR);
4430
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SX);
4431
/* thor2 PAM56 */
4432
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_SR);
4433
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_SR2);
4434
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_SR4);
4435
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4436
/* thor2 PAM112 */
4437
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4438
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4439
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_DR4);
4440
break;
4441
4442
case BNXT_MEDIA_ER:
4443
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_ER4);
4444
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_AUI4);
4445
/* thor2 PAM56 */
4446
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4447
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4448
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4449
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_FR8);
4450
/* thor2 PAM112 */
4451
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4452
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4_AC);
4453
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4454
break;
4455
4456
case BNXT_MEDIA_KR:
4457
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
4458
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
4459
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
4460
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_KR4);
4461
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR2);
4462
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR4);
4463
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_KR4);
4464
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_KR);
4465
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_20GB, IFM_20G_KR2);
4466
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4467
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4468
break;
4469
4470
case BNXT_MEDIA_AC:
4471
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_ACC);
4472
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_AOC);
4473
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI);
4474
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI_AC);
4475
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_ACC);
4476
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_AOC);
4477
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI);
4478
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI_AC);
4479
break;
4480
4481
case BNXT_MEDIA_BASECX:
4482
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4483
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4484
break;
4485
4486
case BNXT_MEDIA_BASET:
4487
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_T);
4488
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_T);
4489
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_T);
4490
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100MB, IFM_100_T);
4491
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10MB, IFM_10_T);
4492
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_T);
4493
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_T);
4494
break;
4495
4496
case BNXT_MEDIA_BASEKX:
4497
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4498
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_KX);
4499
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4500
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_KR);
4501
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_KX);
4502
break;
4503
4504
case BNXT_MEDIA_BASESGMII:
4505
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SGMII);
4506
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SGMII);
4507
break;
4508
4509
default:
4510
break;
4511
4512
}
4513
return;
4514
4515
}
4516
4517
static void
4518
bnxt_add_media_types(struct bnxt_softc *softc)
4519
{
4520
struct bnxt_link_info *link_info = &softc->link_info;
4521
uint16_t supported_NRZ_speeds = 0, supported_pam4_speeds = 0, supported_speeds2 = 0;
4522
uint8_t phy_type = get_phy_type(softc), media_type;
4523
4524
supported_NRZ_speeds = link_info->support_speeds;
4525
supported_speeds2 = link_info->support_speeds2;
4526
supported_pam4_speeds = link_info->support_pam4_speeds;
4527
4528
/* Auto is always supported */
4529
ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4530
4531
if (softc->flags & BNXT_FLAG_NPAR)
4532
return;
4533
4534
switch (phy_type) {
4535
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
4536
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
4537
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
4538
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
4539
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
4540
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
4541
4542
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
4543
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
4544
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
4545
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR8:
4546
4547
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR:
4548
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR2:
4549
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR4:
4550
4551
media_type = BNXT_MEDIA_CR;
4552
break;
4553
4554
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
4555
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
4556
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
4557
4558
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
4559
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR2:
4560
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
4561
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR8:
4562
4563
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR:
4564
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR2:
4565
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR4:
4566
4567
media_type = BNXT_MEDIA_LR;
4568
break;
4569
4570
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
4571
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
4572
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
4573
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
4574
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
4575
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
4576
4577
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
4578
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR2:
4579
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
4580
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR8:
4581
4582
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR:
4583
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR2:
4584
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR4:
4585
4586
media_type = BNXT_MEDIA_SR;
4587
break;
4588
4589
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
4590
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
4591
4592
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASEER:
4593
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER2:
4594
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
4595
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER8:
4596
4597
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER:
4598
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER2:
4599
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4:
4600
4601
media_type = BNXT_MEDIA_ER;
4602
break;
4603
4604
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
4605
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
4606
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
4607
media_type = BNXT_MEDIA_KR;
4608
break;
4609
4610
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
4611
media_type = BNXT_MEDIA_AC;
4612
break;
4613
4614
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
4615
media_type = BNXT_MEDIA_BASECX;
4616
break;
4617
4618
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
4619
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
4620
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
4621
media_type = BNXT_MEDIA_BASET;
4622
break;
4623
4624
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
4625
media_type = BNXT_MEDIA_BASEKX;
4626
break;
4627
4628
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
4629
media_type = BNXT_MEDIA_BASESGMII;
4630
break;
4631
4632
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
4633
/* Only Autoneg is supported for TYPE_UNKNOWN */
4634
break;
4635
4636
default:
4637
/* Only Autoneg is supported for new phy type values */
4638
device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
4639
break;
4640
}
4641
4642
switch (link_info->sig_mode) {
4643
case BNXT_SIG_MODE_NRZ:
4644
if (supported_NRZ_speeds != 0)
4645
add_media(softc, media_type, supported_NRZ_speeds, 0, 0);
4646
else
4647
add_media(softc, media_type, 0, 0, supported_speeds2);
4648
break;
4649
case BNXT_SIG_MODE_PAM4:
4650
if (supported_pam4_speeds != 0)
4651
add_media(softc, media_type, 0, supported_pam4_speeds, 0);
4652
else
4653
add_media(softc, media_type, 0, 0, supported_speeds2);
4654
break;
4655
case BNXT_SIG_MODE_PAM4_112:
4656
add_media(softc, media_type, 0, 0, supported_speeds2);
4657
break;
4658
}
4659
4660
return;
4661
}
4662
4663
static int
4664
bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
4665
{
4666
uint32_t flag;
4667
4668
if (bar->res != NULL) {
4669
device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
4670
return EDOOFUS;
4671
}
4672
4673
bar->rid = PCIR_BAR(bar_num);
4674
flag = RF_ACTIVE;
4675
if (shareable)
4676
flag |= RF_SHAREABLE;
4677
4678
if ((bar->res =
4679
bus_alloc_resource_any(softc->dev,
4680
SYS_RES_MEMORY,
4681
&bar->rid,
4682
flag)) == NULL) {
4683
device_printf(softc->dev,
4684
"PCI BAR%d mapping failure\n", bar_num);
4685
return (ENXIO);
4686
}
4687
bar->tag = rman_get_bustag(bar->res);
4688
bar->handle = rman_get_bushandle(bar->res);
4689
bar->size = rman_get_size(bar->res);
4690
4691
return 0;
4692
}
4693
4694
static int
4695
bnxt_pci_mapping(struct bnxt_softc *softc)
4696
{
4697
int rc;
4698
4699
rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
4700
if (rc)
4701
return rc;
4702
4703
rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
4704
4705
return rc;
4706
}
4707
4708
static void
4709
bnxt_pci_mapping_free(struct bnxt_softc *softc)
4710
{
4711
if (softc->hwrm_bar.res != NULL)
4712
bus_release_resource(softc->dev, SYS_RES_MEMORY,
4713
softc->hwrm_bar.rid, softc->hwrm_bar.res);
4714
softc->hwrm_bar.res = NULL;
4715
4716
if (softc->doorbell_bar.res != NULL)
4717
bus_release_resource(softc->dev, SYS_RES_MEMORY,
4718
softc->doorbell_bar.rid, softc->doorbell_bar.res);
4719
softc->doorbell_bar.res = NULL;
4720
}
4721
4722
static int
4723
bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
4724
{
4725
struct bnxt_link_info *link_info = &softc->link_info;
4726
uint8_t link_up = link_info->link_up;
4727
int rc = 0;
4728
4729
rc = bnxt_hwrm_port_phy_qcfg(softc);
4730
if (rc)
4731
goto exit;
4732
4733
/* TODO: need to add more logic to report VF link */
4734
if (chng_link_state) {
4735
if (link_info->phy_link_status ==
4736
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
4737
link_info->link_up = 1;
4738
else
4739
link_info->link_up = 0;
4740
if (link_up != link_info->link_up)
4741
bnxt_report_link(softc);
4742
} else {
4743
/* always link down if not require to update link state */
4744
link_info->link_up = 0;
4745
}
4746
4747
exit:
4748
return rc;
4749
}
4750
4751
#define ETHTOOL_SPEED_1000 1000
4752
#define ETHTOOL_SPEED_10000 10000
4753
#define ETHTOOL_SPEED_20000 20000
4754
#define ETHTOOL_SPEED_25000 25000
4755
#define ETHTOOL_SPEED_40000 40000
4756
#define ETHTOOL_SPEED_50000 50000
4757
#define ETHTOOL_SPEED_100000 100000
4758
#define ETHTOOL_SPEED_200000 200000
4759
#define ETHTOOL_SPEED_UNKNOWN -1
4760
4761
static u32
4762
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
4763
{
4764
switch (fw_link_speed) {
4765
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4766
return ETHTOOL_SPEED_1000;
4767
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4768
return ETHTOOL_SPEED_10000;
4769
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4770
return ETHTOOL_SPEED_20000;
4771
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4772
return ETHTOOL_SPEED_25000;
4773
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4774
return ETHTOOL_SPEED_40000;
4775
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4776
return ETHTOOL_SPEED_50000;
4777
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4778
return ETHTOOL_SPEED_100000;
4779
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4780
return ETHTOOL_SPEED_200000;
4781
default:
4782
return ETHTOOL_SPEED_UNKNOWN;
4783
}
4784
}
4785
4786
void
4787
bnxt_report_link(struct bnxt_softc *softc)
4788
{
4789
struct bnxt_link_info *link_info = &softc->link_info;
4790
const char *duplex = NULL, *flow_ctrl = NULL;
4791
const char *signal_mode = "";
4792
4793
if(softc->edev)
4794
softc->edev->espeed =
4795
bnxt_fw_to_ethtool_speed(link_info->link_speed);
4796
4797
if (link_info->link_up == link_info->last_link_up) {
4798
if (!link_info->link_up)
4799
return;
4800
if ((link_info->duplex == link_info->last_duplex) &&
4801
(link_info->phy_type == link_info->last_phy_type) &&
4802
(!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
4803
return;
4804
}
4805
4806
if (link_info->link_up) {
4807
if (link_info->duplex ==
4808
HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
4809
duplex = "full duplex";
4810
else
4811
duplex = "half duplex";
4812
if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
4813
flow_ctrl = "FC - receive & transmit";
4814
else if (link_info->flow_ctrl.tx)
4815
flow_ctrl = "FC - transmit";
4816
else if (link_info->flow_ctrl.rx)
4817
flow_ctrl = "FC - receive";
4818
else
4819
flow_ctrl = "FC - none";
4820
4821
if (softc->link_info.phy_qcfg_resp.option_flags &
4822
HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
4823
uint8_t sig_mode = softc->link_info.active_fec_sig_mode &
4824
HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
4825
switch (sig_mode) {
4826
case BNXT_SIG_MODE_NRZ:
4827
signal_mode = "(NRZ) ";
4828
break;
4829
case BNXT_SIG_MODE_PAM4:
4830
signal_mode = "(PAM4 56Gbps) ";
4831
break;
4832
case BNXT_SIG_MODE_PAM4_112:
4833
signal_mode = "(PAM4 112Gbps) ";
4834
break;
4835
default:
4836
break;
4837
}
4838
link_info->sig_mode = sig_mode;
4839
}
4840
4841
iflib_link_state_change(softc->ctx, LINK_STATE_UP,
4842
IF_Gbps(100));
4843
device_printf(softc->dev, "Link is UP %s %s, %s - %d Mbps \n", duplex, signal_mode,
4844
flow_ctrl, (link_info->link_speed * 100));
4845
} else {
4846
iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
4847
bnxt_get_baudrate(&softc->link_info));
4848
device_printf(softc->dev, "Link is Down\n");
4849
}
4850
4851
link_info->last_link_up = link_info->link_up;
4852
link_info->last_duplex = link_info->duplex;
4853
link_info->last_phy_type = link_info->phy_type;
4854
link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
4855
link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
4856
link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
4857
/* update media types */
4858
ifmedia_removeall(softc->media);
4859
bnxt_add_media_types(softc);
4860
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
4861
}
4862
4863
static int
4864
bnxt_handle_isr(void *arg)
4865
{
4866
struct bnxt_cp_ring *cpr = arg;
4867
struct bnxt_softc *softc = cpr->ring.softc;
4868
4869
cpr->int_count++;
4870
/* Disable further interrupts for this queue */
4871
if (!BNXT_CHIP_P5_PLUS(softc))
4872
softc->db_ops.bnxt_db_rx_cq(cpr, 0);
4873
4874
return FILTER_SCHEDULE_THREAD;
4875
}
4876
4877
static int
4878
bnxt_handle_def_cp(void *arg)
4879
{
4880
struct bnxt_softc *softc = arg;
4881
4882
softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
4883
iflib_config_task_enqueue(softc->ctx, &softc->def_cp_task);
4884
return FILTER_HANDLED;
4885
}
4886
4887
static void
4888
bnxt_clear_ids(struct bnxt_softc *softc)
4889
{
4890
int i;
4891
4892
softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4893
softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4894
softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4895
softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4896
for (i = 0; i < softc->ntxqsets; i++) {
4897
softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4898
softc->tx_cp_rings[i].ring.phys_id =
4899
(uint16_t)HWRM_NA_SIGNATURE;
4900
softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4901
4902
if (!softc->nq_rings)
4903
continue;
4904
softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4905
softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4906
}
4907
for (i = 0; i < softc->nrxqsets; i++) {
4908
softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4909
softc->rx_cp_rings[i].ring.phys_id =
4910
(uint16_t)HWRM_NA_SIGNATURE;
4911
softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4912
softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4913
softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
4914
}
4915
softc->vnic_info.filter_id = -1;
4916
softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
4917
softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
4918
memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
4919
softc->vnic_info.rss_grp_tbl.idi_size);
4920
}
4921
4922
static void
4923
bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
4924
{
4925
struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
4926
int i;
4927
4928
for (i = 0; i < cpr->ring.ring_size; i++)
4929
cmp[i].info3_v = !cpr->v_bit;
4930
}
4931
4932
static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
4933
{
4934
u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
4935
4936
switch (err_type) {
4937
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
4938
device_printf(softc->dev,
4939
"1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
4940
BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
4941
break;
4942
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
4943
device_printf(softc->dev,
4944
"Pause Storm detected!\n");
4945
break;
4946
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
4947
device_printf(softc->dev,
4948
"One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
4949
BNXT_EVENT_DBR_EPOCH(data1));
4950
break;
4951
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
4952
const char *nvm_err_str;
4953
4954
if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
4955
nvm_err_str = "nvm write error";
4956
else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
4957
nvm_err_str = "nvm erase error";
4958
else
4959
nvm_err_str = "unrecognized nvm error";
4960
4961
device_printf(softc->dev,
4962
"%s reported at address 0x%x\n", nvm_err_str,
4963
(u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
4964
break;
4965
}
4966
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
4967
char *threshold_type;
4968
char *dir_str;
4969
4970
switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
4971
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
4972
threshold_type = "warning";
4973
break;
4974
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
4975
threshold_type = "critical";
4976
break;
4977
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
4978
threshold_type = "fatal";
4979
break;
4980
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
4981
threshold_type = "shutdown";
4982
break;
4983
default:
4984
device_printf(softc->dev,
4985
"Unknown Thermal threshold type event\n");
4986
return;
4987
}
4988
if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
4989
dir_str = "above";
4990
else
4991
dir_str = "below";
4992
device_printf(softc->dev,
4993
"Chip temperature has gone %s the %s thermal threshold!\n",
4994
dir_str, threshold_type);
4995
device_printf(softc->dev,
4996
"Temperature (In Celsius), Current: %u, threshold: %u\n",
4997
BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
4998
BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
4999
break;
5000
}
5001
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
5002
device_printf(softc->dev,
5003
"Speed change is not supported with dual rate transceivers on this board\n");
5004
break;
5005
5006
default:
5007
device_printf(softc->dev,
5008
"FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
5009
err_type, data1, data2);
5010
break;
5011
}
5012
}
5013
5014
static void
5015
bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
5016
{
5017
struct hwrm_async_event_cmpl *ae = (void *)cmpl;
5018
uint16_t async_id = le16toh(ae->event_id);
5019
struct ifmediareq ifmr;
5020
char *type_str;
5021
char *status_desc;
5022
struct bnxt_fw_health *fw_health;
5023
u32 data1 = le32toh(ae->event_data1);
5024
u32 data2 = le32toh(ae->event_data2);
5025
5026
switch (async_id) {
5027
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
5028
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
5029
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
5030
if (BNXT_CHIP_P5_PLUS(softc))
5031
bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
5032
else
5033
bnxt_media_status(softc->ctx, &ifmr);
5034
break;
5035
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
5036
bnxt_event_error_report(softc, data1, data2);
5037
goto async_event_process_exit;
5038
}
5039
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
5040
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
5041
break;
5042
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
5043
type_str = "Solicited";
5044
5045
if (!softc->fw_health)
5046
goto async_event_process_exit;
5047
5048
softc->fw_reset_timestamp = jiffies;
5049
softc->fw_reset_min_dsecs = ae->timestamp_lo;
5050
if (!softc->fw_reset_min_dsecs)
5051
softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
5052
softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
5053
if (!softc->fw_reset_max_dsecs)
5054
softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
5055
if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
5056
set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
5057
} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5058
type_str = "Fatal";
5059
softc->fw_health->fatalities++;
5060
set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
5061
} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
5062
EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
5063
type_str = "Non-fatal";
5064
softc->fw_health->survivals++;
5065
set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
5066
}
5067
device_printf(softc->dev,
5068
"%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
5069
type_str, data1, data2,
5070
softc->fw_reset_min_dsecs * 100,
5071
softc->fw_reset_max_dsecs * 100);
5072
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
5073
break;
5074
}
5075
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
5076
fw_health = softc->fw_health;
5077
status_desc = "healthy";
5078
u32 status;
5079
5080
if (!fw_health)
5081
goto async_event_process_exit;
5082
5083
if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
5084
fw_health->enabled = false;
5085
device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
5086
break;
5087
}
5088
fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
5089
fw_health->tmr_multiplier =
5090
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
5091
HZ * 10);
5092
fw_health->tmr_counter = fw_health->tmr_multiplier;
5093
if (!fw_health->enabled)
5094
fw_health->last_fw_heartbeat =
5095
bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
5096
fw_health->last_fw_reset_cnt =
5097
bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
5098
status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
5099
if (status != BNXT_FW_STATUS_HEALTHY)
5100
status_desc = "unhealthy";
5101
device_printf(softc->dev,
5102
"Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
5103
fw_health->primary ? "primary" : "backup", status,
5104
status_desc, fw_health->last_fw_reset_cnt);
5105
if (!fw_health->enabled) {
5106
/* Make sure tmr_counter is set and seen by
5107
* bnxt_health_check() before setting enabled
5108
*/
5109
smp_mb();
5110
fw_health->enabled = true;
5111
}
5112
goto async_event_process_exit;
5113
}
5114
5115
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
5116
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
5117
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
5118
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
5119
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
5120
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
5121
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
5122
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
5123
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
5124
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
5125
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
5126
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
5127
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
5128
device_printf(softc->dev,
5129
"Unhandled async completion type %u\n", async_id);
5130
break;
5131
default:
5132
dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5133
async_id);
5134
break;
5135
}
5136
bnxt_queue_sp_work(softc);
5137
5138
async_event_process_exit:
5139
bnxt_ulp_async_events(softc, ae);
5140
}
5141
5142
static void
5143
bnxt_def_cp_task(void *context, int pending)
5144
{
5145
if_ctx_t ctx = context;
5146
struct bnxt_softc *softc = iflib_get_softc(ctx);
5147
struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
5148
5149
/* Handle completions on the default completion ring */
5150
struct cmpl_base *cmpl;
5151
uint32_t cons = cpr->cons;
5152
bool v_bit = cpr->v_bit;
5153
bool last_v_bit;
5154
uint32_t last_cons;
5155
uint16_t type;
5156
5157
for (;;) {
5158
last_cons = cons;
5159
last_v_bit = v_bit;
5160
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
5161
cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
5162
5163
if (!CMP_VALID(cmpl, v_bit))
5164
break;
5165
5166
type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
5167
switch (type) {
5168
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
5169
bnxt_handle_async_event(softc, cmpl);
5170
break;
5171
case CMPL_BASE_TYPE_TX_L2:
5172
case CMPL_BASE_TYPE_RX_L2:
5173
case CMPL_BASE_TYPE_RX_L2_V3:
5174
case CMPL_BASE_TYPE_RX_AGG:
5175
case CMPL_BASE_TYPE_RX_TPA_START:
5176
case CMPL_BASE_TYPE_RX_TPA_START_V3:
5177
case CMPL_BASE_TYPE_RX_TPA_END:
5178
case CMPL_BASE_TYPE_STAT_EJECT:
5179
case CMPL_BASE_TYPE_HWRM_DONE:
5180
case CMPL_BASE_TYPE_HWRM_FWD_REQ:
5181
case CMPL_BASE_TYPE_HWRM_FWD_RESP:
5182
case CMPL_BASE_TYPE_CQ_NOTIFICATION:
5183
case CMPL_BASE_TYPE_SRQ_EVENT:
5184
case CMPL_BASE_TYPE_DBQ_EVENT:
5185
case CMPL_BASE_TYPE_QP_EVENT:
5186
case CMPL_BASE_TYPE_FUNC_EVENT:
5187
dev_dbg(softc->dev, "Unhandled Async event completion type %u\n",
5188
type);
5189
break;
5190
default:
5191
dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5192
type);
5193
break;
5194
}
5195
}
5196
5197
cpr->cons = last_cons;
5198
cpr->v_bit = last_v_bit;
5199
softc->db_ops.bnxt_db_rx_cq(cpr, 1);
5200
}
5201
5202
uint8_t
5203
get_phy_type(struct bnxt_softc *softc)
5204
{
5205
struct bnxt_link_info *link_info = &softc->link_info;
5206
uint8_t phy_type = link_info->phy_type;
5207
uint16_t supported;
5208
5209
if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
5210
return phy_type;
5211
5212
/* Deduce the phy type from the media type and supported speeds */
5213
supported = link_info->support_speeds;
5214
5215
if (link_info->media_type ==
5216
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
5217
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
5218
if (link_info->media_type ==
5219
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
5220
if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
5221
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
5222
if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
5223
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
5224
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
5225
}
5226
if (link_info->media_type ==
5227
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
5228
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
5229
5230
return phy_type;
5231
}
5232
5233
bool
5234
bnxt_check_hwrm_version(struct bnxt_softc *softc)
5235
{
5236
char buf[16];
5237
5238
sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
5239
softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
5240
if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
5241
device_printf(softc->dev,
5242
"WARNING: HWRM version %s is too old (older than %s)\n",
5243
softc->ver_info->hwrm_if_ver, buf);
5244
return false;
5245
}
5246
else if(softc->ver_info->hwrm_min_major ==
5247
softc->ver_info->hwrm_if_major) {
5248
if (softc->ver_info->hwrm_min_minor >
5249
softc->ver_info->hwrm_if_minor) {
5250
device_printf(softc->dev,
5251
"WARNING: HWRM version %s is too old (older than %s)\n",
5252
softc->ver_info->hwrm_if_ver, buf);
5253
return false;
5254
}
5255
else if (softc->ver_info->hwrm_min_minor ==
5256
softc->ver_info->hwrm_if_minor) {
5257
if (softc->ver_info->hwrm_min_update >
5258
softc->ver_info->hwrm_if_update) {
5259
device_printf(softc->dev,
5260
"WARNING: HWRM version %s is too old (older than %s)\n",
5261
softc->ver_info->hwrm_if_ver, buf);
5262
return false;
5263
}
5264
}
5265
}
5266
return true;
5267
}
5268
5269
static uint64_t
5270
bnxt_get_baudrate(struct bnxt_link_info *link)
5271
{
5272
switch (link->link_speed) {
5273
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
5274
return IF_Mbps(100);
5275
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
5276
return IF_Gbps(1);
5277
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
5278
return IF_Gbps(2);
5279
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
5280
return IF_Mbps(2500);
5281
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
5282
return IF_Gbps(10);
5283
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
5284
return IF_Gbps(20);
5285
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
5286
return IF_Gbps(25);
5287
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
5288
return IF_Gbps(40);
5289
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
5290
return IF_Gbps(50);
5291
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
5292
return IF_Gbps(100);
5293
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
5294
return IF_Mbps(10);
5295
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
5296
return IF_Gbps(200);
5297
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB:
5298
return IF_Gbps(400);
5299
}
5300
return IF_Gbps(100);
5301
}
5302
5303
static void
5304
bnxt_get_wol_settings(struct bnxt_softc *softc)
5305
{
5306
uint16_t wol_handle = 0;
5307
5308
if (!bnxt_wol_supported(softc))
5309
return;
5310
5311
do {
5312
wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
5313
} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
5314
}
5315
5316