Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/bnxt/bnxt_en/if_bnxt.c
106144 views
1
/*-
2
* Broadcom NetXtreme-C/E network driver.
3
*
4
* Copyright (c) 2016 Broadcom, All Rights Reserved.
5
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26
* THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/socket.h>
31
#include <sys/kernel.h>
32
#include <sys/bus.h>
33
#include <sys/module.h>
34
#include <sys/rman.h>
35
#include <sys/endian.h>
36
#include <sys/sockio.h>
37
#include <sys/priv.h>
38
39
#include <machine/bus.h>
40
#include <machine/resource.h>
41
42
#include <dev/pci/pcireg.h>
43
44
#include <net/if.h>
45
#include <net/if_dl.h>
46
#include <net/if_media.h>
47
#include <net/if_var.h>
48
#include <net/ethernet.h>
49
#include <net/iflib.h>
50
51
#define WANT_NATIVE_PCI_GET_SLOT
52
#include <linux/pci.h>
53
#include <linux/kmod.h>
54
#include <linux/module.h>
55
#include <linux/delay.h>
56
#include <linux/idr.h>
57
#include <linux/netdevice.h>
58
#include <linux/etherdevice.h>
59
#include <linux/rcupdate.h>
60
#include "opt_inet.h"
61
#include "opt_inet6.h"
62
#include "opt_rss.h"
63
64
#include "ifdi_if.h"
65
66
#include "bnxt.h"
67
#include "bnxt_hwrm.h"
68
#include "bnxt_ioctl.h"
69
#include "bnxt_sysctl.h"
70
#include "hsi_struct_def.h"
71
#include "bnxt_mgmt.h"
72
#include "bnxt_ulp.h"
73
#include "bnxt_auxbus_compat.h"
74
75
/*
76
* PCI Device ID Table
77
*/
78
79
static const pci_vendor_info_t bnxt_vendor_info_array[] =
80
{
81
PVID(BROADCOM_VENDOR_ID, BCM57301,
82
"Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
83
PVID(BROADCOM_VENDOR_ID, BCM57302,
84
"Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
85
PVID(BROADCOM_VENDOR_ID, BCM57304,
86
"Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
87
PVID(BROADCOM_VENDOR_ID, BCM57311,
88
"Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
89
PVID(BROADCOM_VENDOR_ID, BCM57312,
90
"Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
91
PVID(BROADCOM_VENDOR_ID, BCM57314,
92
"Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
93
PVID(BROADCOM_VENDOR_ID, BCM57402,
94
"Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
95
PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
96
"Broadcom BCM57402 NetXtreme-E Partition"),
97
PVID(BROADCOM_VENDOR_ID, BCM57404,
98
"Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
99
PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
100
"Broadcom BCM57404 NetXtreme-E Partition"),
101
PVID(BROADCOM_VENDOR_ID, BCM57406,
102
"Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
103
PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
104
"Broadcom BCM57406 NetXtreme-E Partition"),
105
PVID(BROADCOM_VENDOR_ID, BCM57407,
106
"Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
107
PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
108
"Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
109
PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
110
"Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
111
PVID(BROADCOM_VENDOR_ID, BCM57412,
112
"Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
113
PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
114
"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
115
PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
116
"Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
117
PVID(BROADCOM_VENDOR_ID, BCM57414,
118
"Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
119
PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
120
"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
121
PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
122
"Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
123
PVID(BROADCOM_VENDOR_ID, BCM57416,
124
"Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
125
PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
126
"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
127
PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
128
"Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
129
PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
130
"Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
131
PVID(BROADCOM_VENDOR_ID, BCM57417,
132
"Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
133
PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
134
"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
135
PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
136
"Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
137
PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
138
"Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
139
PVID(BROADCOM_VENDOR_ID, BCM57454,
140
"Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
141
PVID(BROADCOM_VENDOR_ID, BCM58700,
142
"Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
143
PVID(BROADCOM_VENDOR_ID, BCM57508,
144
"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
145
PVID(BROADCOM_VENDOR_ID, BCM57504,
146
"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
147
PVID(BROADCOM_VENDOR_ID, BCM57504_NPAR,
148
"Broadcom BCM57504 NetXtreme-E Ethernet Partition"),
149
PVID(BROADCOM_VENDOR_ID, BCM57502,
150
"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
151
PVID(BROADCOM_VENDOR_ID, BCM57608,
152
"Broadcom BCM57608 NetXtreme-E 25Gb/50Gb/100Gb/200Gb/400Gb Ethernet"),
153
PVID(BROADCOM_VENDOR_ID, BCM57604,
154
"Broadcom BCM57604 NetXtreme-E 25Gb/50Gb/100Gb/200Gb Ethernet"),
155
PVID(BROADCOM_VENDOR_ID, BCM57602,
156
"Broadcom BCM57602 NetXtreme-E 25Gb/50Gb Ethernet"),
157
PVID(BROADCOM_VENDOR_ID, BCM57601,
158
"Broadcom BCM57601 NetXtreme-E 25Gb/50Gb Ethernet"),
159
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
160
"Broadcom NetXtreme-C Ethernet Virtual Function"),
161
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
162
"Broadcom NetXtreme-C Ethernet Virtual Function"),
163
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
164
"Broadcom NetXtreme-C Ethernet Virtual Function"),
165
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
166
"Broadcom NetXtreme-E Ethernet Virtual Function"),
167
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
168
"Broadcom NetXtreme-E Ethernet Virtual Function"),
169
PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
170
"Broadcom NetXtreme-E Ethernet Virtual Function"),
171
/* required last entry */
172
173
PVID_END
174
};
175
176
/*
177
* Function prototypes
178
*/
179
180
SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
181
int bnxt_num_pfs = 0;
182
183
void
184
process_nq(struct bnxt_softc *softc, uint16_t nqid);
185
static void *bnxt_register(device_t dev);
186
187
/* Soft queue setup and teardown */
188
static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
189
uint64_t *paddrs, int ntxqs, int ntxqsets);
190
static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
191
uint64_t *paddrs, int nrxqs, int nrxqsets);
192
static void bnxt_queues_free(if_ctx_t ctx);
193
194
/* Device setup and teardown */
195
static int bnxt_attach_pre(if_ctx_t ctx);
196
static int bnxt_attach_post(if_ctx_t ctx);
197
static int bnxt_detach(if_ctx_t ctx);
198
199
/* Device configuration */
200
static void bnxt_init(if_ctx_t ctx);
201
static void bnxt_stop(if_ctx_t ctx);
202
static void bnxt_multi_set(if_ctx_t ctx);
203
static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
204
static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
205
static int bnxt_media_change(if_ctx_t ctx);
206
static int bnxt_promisc_set(if_ctx_t ctx, int flags);
207
static uint64_t bnxt_get_counter(if_ctx_t, ift_counter);
208
static void bnxt_update_admin_status(if_ctx_t ctx);
209
static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
210
211
/* Interrupt enable / disable */
212
static void bnxt_intr_enable(if_ctx_t ctx);
213
static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
214
static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
215
static void bnxt_disable_intr(if_ctx_t ctx);
216
static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
217
218
/* vlan support */
219
static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
220
static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
221
222
/* ioctl */
223
static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
224
225
static int bnxt_shutdown(if_ctx_t ctx);
226
static int bnxt_suspend(if_ctx_t ctx);
227
static int bnxt_resume(if_ctx_t ctx);
228
229
/* Internal support functions */
230
static int bnxt_probe_phy(struct bnxt_softc *softc);
231
static void bnxt_add_media_types(struct bnxt_softc *softc);
232
static int bnxt_pci_mapping(struct bnxt_softc *softc);
233
static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
234
static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
235
static int bnxt_handle_def_cp(void *arg);
236
static int bnxt_handle_isr(void *arg);
237
static void bnxt_clear_ids(struct bnxt_softc *softc);
238
static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
239
static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
240
static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
241
static void bnxt_def_cp_task(void *context, int pending);
242
static void bnxt_handle_async_event(struct bnxt_softc *softc,
243
struct cmpl_base *cmpl);
244
static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
245
static void bnxt_get_wol_settings(struct bnxt_softc *softc);
246
static int bnxt_wol_config(if_ctx_t ctx);
247
static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
248
static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
249
static void bnxt_get_port_module_status(struct bnxt_softc *softc);
250
static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
251
static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
252
static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
253
void bnxt_queue_sp_work(struct bnxt_softc *bp);
254
255
void bnxt_fw_reset(struct bnxt_softc *bp);
256
/*
257
* Device Interface Declaration
258
*/
259
260
static device_method_t bnxt_methods[] = {
261
/* Device interface */
262
DEVMETHOD(device_register, bnxt_register),
263
DEVMETHOD(device_probe, iflib_device_probe),
264
DEVMETHOD(device_attach, iflib_device_attach),
265
DEVMETHOD(device_detach, iflib_device_detach),
266
DEVMETHOD(device_shutdown, iflib_device_shutdown),
267
DEVMETHOD(device_suspend, iflib_device_suspend),
268
DEVMETHOD(device_resume, iflib_device_resume),
269
DEVMETHOD_END
270
};
271
272
static driver_t bnxt_driver = {
273
"bnxt", bnxt_methods, sizeof(struct bnxt_softc),
274
};
275
276
DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
277
278
MODULE_LICENSE("Dual BSD/GPL");
279
MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
280
MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
281
MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
282
MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
283
MODULE_VERSION(if_bnxt, 1);
284
285
IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
286
287
void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
288
u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
289
290
u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
291
{
292
293
if (!bar_idx)
294
return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
295
else
296
return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
297
}
298
299
void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
300
{
301
302
if (!bar_idx)
303
bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
304
else
305
bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
306
}
307
308
static DEFINE_IDA(bnxt_aux_dev_ids);
309
310
static device_method_t bnxt_iflib_methods[] = {
311
DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
312
DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
313
DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
314
315
DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
316
DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
317
DEVMETHOD(ifdi_detach, bnxt_detach),
318
319
DEVMETHOD(ifdi_init, bnxt_init),
320
DEVMETHOD(ifdi_stop, bnxt_stop),
321
DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
322
DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
323
DEVMETHOD(ifdi_media_status, bnxt_media_status),
324
DEVMETHOD(ifdi_media_change, bnxt_media_change),
325
DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
326
DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
327
DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
328
DEVMETHOD(ifdi_timer, bnxt_if_timer),
329
330
DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
331
DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
332
DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
333
DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
334
DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
335
336
DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
337
DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
338
339
DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
340
341
DEVMETHOD(ifdi_suspend, bnxt_suspend),
342
DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
343
DEVMETHOD(ifdi_resume, bnxt_resume),
344
DEVMETHOD(ifdi_i2c_req, bnxt_i2c_req),
345
346
DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
347
348
DEVMETHOD_END
349
};
350
351
static driver_t bnxt_iflib_driver = {
352
"bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
353
};
354
355
/*
356
* iflib shared context
357
*/
358
359
#define BNXT_DRIVER_VERSION "230.0.133.0"
360
const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
361
extern struct if_txrx bnxt_txrx;
362
static struct if_shared_ctx bnxt_sctx_init = {
363
.isc_magic = IFLIB_MAGIC,
364
.isc_driver = &bnxt_iflib_driver,
365
.isc_nfl = 2, // Number of Free Lists
366
.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
367
.isc_q_align = PAGE_SIZE,
368
.isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
369
.isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
370
.isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
371
.isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
372
.isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
373
.isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
374
375
// Only use a single segment to avoid page size constraints
376
.isc_rx_nsegments = 1,
377
.isc_ntxqs = 3,
378
.isc_nrxqs = 3,
379
.isc_nrxd_min = {16, 16, 16},
380
.isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
381
PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
382
PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
383
.isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
384
.isc_ntxd_min = {16, 16, 16},
385
.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
386
PAGE_SIZE / sizeof(struct tx_bd_short),
387
/* NQ depth 4096 */
388
PAGE_SIZE / sizeof(struct cmpl_base) * 16},
389
.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
390
391
.isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
392
.isc_vendor_info = bnxt_vendor_info_array,
393
.isc_driver_version = bnxt_driver_version,
394
};
395
396
#define PCI_SUBSYSTEM_ID 0x2e
397
static struct workqueue_struct *bnxt_pf_wq;
398
399
extern void bnxt_destroy_irq(struct bnxt_softc *softc);
400
401
/*
402
* Device Methods
403
*/
404
405
static void *
406
bnxt_register(device_t dev)
407
{
408
return (&bnxt_sctx_init);
409
}
410
411
static void
412
bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
413
{
414
415
if (softc->nq_rings)
416
return;
417
418
softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
419
M_DEVBUF, M_NOWAIT | M_ZERO);
420
}
421
422
static void
423
bnxt_nq_free(struct bnxt_softc *softc)
424
{
425
426
if (softc->nq_rings)
427
free(softc->nq_rings, M_DEVBUF);
428
softc->nq_rings = NULL;
429
}
430
431
432
static void
433
bnxt_set_db_mask(struct bnxt_softc *bp, struct bnxt_ring *db,
434
u32 ring_type)
435
{
436
if (BNXT_CHIP_P7(bp)) {
437
db->db_epoch_mask = db->db_ring_mask + 1;
438
db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
439
440
}
441
}
442
443
/*
444
* Device Dependent Configuration Functions
445
*/
446
447
/* Soft queue setup and teardown */
448
static int
449
bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
450
uint64_t *paddrs, int ntxqs, int ntxqsets)
451
{
452
struct bnxt_softc *softc;
453
int i;
454
int rc;
455
456
softc = iflib_get_softc(ctx);
457
458
if (BNXT_CHIP_P5_PLUS(softc)) {
459
bnxt_nq_alloc(softc, ntxqsets);
460
if (!softc->nq_rings) {
461
device_printf(iflib_get_dev(ctx),
462
"unable to allocate NQ rings\n");
463
rc = ENOMEM;
464
goto nq_alloc_fail;
465
}
466
}
467
468
softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
469
M_DEVBUF, M_NOWAIT | M_ZERO);
470
if (!softc->tx_cp_rings) {
471
device_printf(iflib_get_dev(ctx),
472
"unable to allocate TX completion rings\n");
473
rc = ENOMEM;
474
goto cp_alloc_fail;
475
}
476
softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
477
M_DEVBUF, M_NOWAIT | M_ZERO);
478
if (!softc->tx_rings) {
479
device_printf(iflib_get_dev(ctx),
480
"unable to allocate TX rings\n");
481
rc = ENOMEM;
482
goto ring_alloc_fail;
483
}
484
485
for (i=0; i < ntxqsets; i++) {
486
rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
487
&softc->tx_stats[i], 0);
488
if (rc)
489
goto dma_alloc_fail;
490
bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
491
BUS_DMASYNC_PREREAD);
492
}
493
494
for (i = 0; i < ntxqsets; i++) {
495
/* Set up the completion ring */
496
softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
497
softc->tx_cp_rings[i].ring.phys_id =
498
(uint16_t)HWRM_NA_SIGNATURE;
499
softc->tx_cp_rings[i].ring.softc = softc;
500
softc->tx_cp_rings[i].ring.idx = i;
501
softc->tx_cp_rings[i].ring.id =
502
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
503
softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
504
softc->legacy_db_size: softc->tx_cp_rings[i].ring.id * 0x80;
505
softc->tx_cp_rings[i].ring.ring_size =
506
softc->scctx->isc_ntxd[0];
507
softc->tx_cp_rings[i].ring.db_ring_mask =
508
softc->tx_cp_rings[i].ring.ring_size - 1;
509
softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
510
softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
511
512
513
/* Set up the TX ring */
514
softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
515
softc->tx_rings[i].softc = softc;
516
softc->tx_rings[i].idx = i;
517
softc->tx_rings[i].id =
518
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
519
softc->tx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
520
softc->legacy_db_size : softc->tx_rings[i].id * 0x80;
521
softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
522
softc->tx_rings[i].db_ring_mask = softc->tx_rings[i].ring_size - 1;
523
softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
524
softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
525
526
bnxt_create_tx_sysctls(softc, i);
527
528
if (BNXT_CHIP_P5_PLUS(softc)) {
529
/* Set up the Notification ring (NQ) */
530
softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
531
softc->nq_rings[i].ring.phys_id =
532
(uint16_t)HWRM_NA_SIGNATURE;
533
softc->nq_rings[i].ring.softc = softc;
534
softc->nq_rings[i].ring.idx = i;
535
softc->nq_rings[i].ring.id = i;
536
softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
537
softc->legacy_db_size : softc->nq_rings[i].ring.id * 0x80;
538
softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
539
softc->nq_rings[i].ring.db_ring_mask = softc->nq_rings[i].ring.ring_size - 1;
540
softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
541
softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
542
softc->nq_rings[i].type = Q_TYPE_TX;
543
}
544
}
545
546
softc->ntxqsets = ntxqsets;
547
return rc;
548
549
dma_alloc_fail:
550
for (i = i - 1; i >= 0; i--)
551
iflib_dma_free(&softc->tx_stats[i]);
552
free(softc->tx_rings, M_DEVBUF);
553
ring_alloc_fail:
554
free(softc->tx_cp_rings, M_DEVBUF);
555
cp_alloc_fail:
556
bnxt_nq_free(softc);
557
nq_alloc_fail:
558
return rc;
559
}
560
561
static void
562
bnxt_queues_free(if_ctx_t ctx)
563
{
564
struct bnxt_softc *softc = iflib_get_softc(ctx);
565
int i;
566
567
// Free TX queues
568
for (i=0; i<softc->ntxqsets; i++)
569
iflib_dma_free(&softc->tx_stats[i]);
570
free(softc->tx_rings, M_DEVBUF);
571
softc->tx_rings = NULL;
572
free(softc->tx_cp_rings, M_DEVBUF);
573
softc->tx_cp_rings = NULL;
574
softc->ntxqsets = 0;
575
576
// Free RX queues
577
for (i=0; i<softc->nrxqsets; i++)
578
iflib_dma_free(&softc->rx_stats[i]);
579
iflib_dma_free(&softc->hw_tx_port_stats);
580
iflib_dma_free(&softc->hw_rx_port_stats);
581
iflib_dma_free(&softc->hw_tx_port_stats_ext);
582
iflib_dma_free(&softc->hw_rx_port_stats_ext);
583
free(softc->grp_info, M_DEVBUF);
584
free(softc->ag_rings, M_DEVBUF);
585
free(softc->rx_rings, M_DEVBUF);
586
free(softc->rx_cp_rings, M_DEVBUF);
587
bnxt_nq_free(softc);
588
}
589
590
static int
591
bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
592
uint64_t *paddrs, int nrxqs, int nrxqsets)
593
{
594
struct bnxt_softc *softc;
595
int i;
596
int rc;
597
598
softc = iflib_get_softc(ctx);
599
600
softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
601
M_DEVBUF, M_NOWAIT | M_ZERO);
602
if (!softc->rx_cp_rings) {
603
device_printf(iflib_get_dev(ctx),
604
"unable to allocate RX completion rings\n");
605
rc = ENOMEM;
606
goto cp_alloc_fail;
607
}
608
softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
609
M_DEVBUF, M_NOWAIT | M_ZERO);
610
if (!softc->rx_rings) {
611
device_printf(iflib_get_dev(ctx),
612
"unable to allocate RX rings\n");
613
rc = ENOMEM;
614
goto ring_alloc_fail;
615
}
616
softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
617
M_DEVBUF, M_NOWAIT | M_ZERO);
618
if (!softc->ag_rings) {
619
device_printf(iflib_get_dev(ctx),
620
"unable to allocate aggregation rings\n");
621
rc = ENOMEM;
622
goto ag_alloc_fail;
623
}
624
softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
625
M_DEVBUF, M_NOWAIT | M_ZERO);
626
if (!softc->grp_info) {
627
device_printf(iflib_get_dev(ctx),
628
"unable to allocate ring groups\n");
629
rc = ENOMEM;
630
goto grp_alloc_fail;
631
}
632
633
for (i=0; i < nrxqsets; i++) {
634
rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
635
&softc->rx_stats[i], 0);
636
if (rc)
637
goto hw_stats_alloc_fail;
638
bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
639
BUS_DMASYNC_PREREAD);
640
}
641
642
/*
643
* Additional 512 bytes for future expansion.
644
* To prevent corruption when loaded with newer firmwares with added counters.
645
* This can be deleted when there will be no further additions of counters.
646
*/
647
#define BNXT_PORT_STAT_PADDING 512
648
649
rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
650
&softc->hw_rx_port_stats, 0);
651
if (rc)
652
goto hw_port_rx_stats_alloc_fail;
653
654
bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
655
softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
656
657
658
rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
659
&softc->hw_tx_port_stats, 0);
660
if (rc)
661
goto hw_port_tx_stats_alloc_fail;
662
663
bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
664
softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
665
666
softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
667
softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
668
669
670
rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
671
&softc->hw_rx_port_stats_ext, 0);
672
if (rc)
673
goto hw_port_rx_stats_ext_alloc_fail;
674
675
bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
676
softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
677
678
rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
679
&softc->hw_tx_port_stats_ext, 0);
680
if (rc)
681
goto hw_port_tx_stats_ext_alloc_fail;
682
683
bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
684
softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
685
686
softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
687
softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
688
689
for (i = 0; i < nrxqsets; i++) {
690
/* Allocation the completion ring */
691
softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
692
softc->rx_cp_rings[i].ring.phys_id =
693
(uint16_t)HWRM_NA_SIGNATURE;
694
softc->rx_cp_rings[i].ring.softc = softc;
695
softc->rx_cp_rings[i].ring.idx = i;
696
softc->rx_cp_rings[i].ring.id = i + 1;
697
softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
698
softc->legacy_db_size : softc->rx_cp_rings[i].ring.id * 0x80;
699
/*
700
* If this ring overflows, RX stops working.
701
*/
702
softc->rx_cp_rings[i].ring.ring_size =
703
softc->scctx->isc_nrxd[0];
704
softc->rx_cp_rings[i].ring.db_ring_mask =
705
softc->rx_cp_rings[i].ring.ring_size - 1;
706
707
softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
708
softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
709
710
/* Allocate the RX ring */
711
softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
712
softc->rx_rings[i].softc = softc;
713
softc->rx_rings[i].idx = i;
714
softc->rx_rings[i].id = i + 1;
715
softc->rx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
716
softc->legacy_db_size : softc->rx_rings[i].id * 0x80;
717
softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
718
softc->rx_rings[i].db_ring_mask =
719
softc->rx_rings[i].ring_size -1;
720
softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
721
softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
722
723
/* Allocate the TPA start buffer */
724
softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
725
(RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
726
M_DEVBUF, M_NOWAIT | M_ZERO);
727
if (softc->rx_rings[i].tpa_start == NULL) {
728
rc = -ENOMEM;
729
device_printf(softc->dev,
730
"Unable to allocate space for TPA\n");
731
goto tpa_alloc_fail;
732
}
733
/* Allocate the AG ring */
734
softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
735
softc->ag_rings[i].softc = softc;
736
softc->ag_rings[i].idx = i;
737
softc->ag_rings[i].id = nrxqsets + i + 1;
738
softc->ag_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
739
softc->legacy_db_size : softc->ag_rings[i].id * 0x80;
740
softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
741
softc->ag_rings[i].db_ring_mask = softc->ag_rings[i].ring_size - 1;
742
softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
743
softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
744
745
/* Allocate the ring group */
746
softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
747
softc->grp_info[i].stats_ctx =
748
softc->rx_cp_rings[i].stats_ctx_id;
749
softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
750
softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
751
softc->grp_info[i].cp_ring_id =
752
softc->rx_cp_rings[i].ring.phys_id;
753
754
bnxt_create_rx_sysctls(softc, i);
755
}
756
757
/*
758
* When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
759
* HWRM every sec with which firmware timeouts can happen
760
*/
761
if (BNXT_PF(softc))
762
bnxt_create_port_stats_sysctls(softc);
763
764
/* And finally, the VNIC */
765
softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
766
softc->vnic_info.filter_id = -1;
767
softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
768
softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
769
softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
770
softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
771
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
772
softc->vnic_info.mc_list_count = 0;
773
softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
774
rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
775
&softc->vnic_info.mc_list, 0);
776
if (rc)
777
goto mc_list_alloc_fail;
778
779
/* The VNIC RSS Hash Key */
780
rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
781
&softc->vnic_info.rss_hash_key_tbl, 0);
782
if (rc)
783
goto rss_hash_alloc_fail;
784
bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
785
softc->vnic_info.rss_hash_key_tbl.idi_map,
786
BUS_DMASYNC_PREWRITE);
787
memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
788
softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
789
790
/* Allocate the RSS tables */
791
rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
792
&softc->vnic_info.rss_grp_tbl, 0);
793
if (rc)
794
goto rss_grp_alloc_fail;
795
bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
796
softc->vnic_info.rss_grp_tbl.idi_map,
797
BUS_DMASYNC_PREWRITE);
798
memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
799
softc->vnic_info.rss_grp_tbl.idi_size);
800
801
softc->nrxqsets = nrxqsets;
802
return rc;
803
804
rss_grp_alloc_fail:
805
iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
806
rss_hash_alloc_fail:
807
iflib_dma_free(&softc->vnic_info.mc_list);
808
mc_list_alloc_fail:
809
for (i = i - 1; i >= 0; i--) {
810
if (softc->rx_rings[i].tpa_start)
811
free(softc->rx_rings[i].tpa_start, M_DEVBUF);
812
}
813
tpa_alloc_fail:
814
iflib_dma_free(&softc->hw_tx_port_stats_ext);
815
hw_port_tx_stats_ext_alloc_fail:
816
iflib_dma_free(&softc->hw_rx_port_stats_ext);
817
hw_port_rx_stats_ext_alloc_fail:
818
iflib_dma_free(&softc->hw_tx_port_stats);
819
hw_port_tx_stats_alloc_fail:
820
iflib_dma_free(&softc->hw_rx_port_stats);
821
hw_port_rx_stats_alloc_fail:
822
for (i=0; i < nrxqsets; i++) {
823
if (softc->rx_stats[i].idi_vaddr)
824
iflib_dma_free(&softc->rx_stats[i]);
825
}
826
hw_stats_alloc_fail:
827
free(softc->grp_info, M_DEVBUF);
828
grp_alloc_fail:
829
free(softc->ag_rings, M_DEVBUF);
830
ag_alloc_fail:
831
free(softc->rx_rings, M_DEVBUF);
832
ring_alloc_fail:
833
free(softc->rx_cp_rings, M_DEVBUF);
834
cp_alloc_fail:
835
return rc;
836
}
837
838
static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
839
{
840
if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
841
iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
842
softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
843
}
844
845
static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
846
{
847
int rc;
848
849
rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
850
&softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
851
852
return rc;
853
}
854
855
static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
856
{
857
int i;
858
859
for (i = 0; i < rmem->nr_pages; i++) {
860
if (!rmem->pg_arr[i].idi_vaddr)
861
continue;
862
863
iflib_dma_free(&rmem->pg_arr[i]);
864
rmem->pg_arr[i].idi_vaddr = NULL;
865
}
866
if (rmem->pg_tbl.idi_vaddr) {
867
iflib_dma_free(&rmem->pg_tbl);
868
rmem->pg_tbl.idi_vaddr = NULL;
869
870
}
871
if (rmem->vmem_size && *rmem->vmem) {
872
free(*rmem->vmem, M_DEVBUF);
873
*rmem->vmem = NULL;
874
}
875
}
876
877
static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
878
{
879
u8 init_val = ctxm->init_value;
880
u16 offset = ctxm->init_offset;
881
u8 *p2 = p;
882
int i;
883
884
if (!init_val)
885
return;
886
if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
887
memset(p, init_val, len);
888
return;
889
}
890
for (i = 0; i < len; i += ctxm->entry_size)
891
*(p2 + i + offset) = init_val;
892
}
893
894
static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
895
{
896
uint64_t valid_bit = 0;
897
int i;
898
int rc;
899
900
if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
901
valid_bit = PTU_PTE_VALID;
902
903
if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
904
size_t pg_tbl_size = rmem->nr_pages * 8;
905
906
if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
907
pg_tbl_size = rmem->page_size;
908
909
rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
910
if (rc)
911
return -ENOMEM;
912
}
913
914
for (i = 0; i < rmem->nr_pages; i++) {
915
uint64_t extra_bits = valid_bit;
916
uint64_t *ptr;
917
918
rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
919
if (rc)
920
return -ENOMEM;
921
922
if (rmem->ctx_mem)
923
bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
924
rmem->page_size);
925
926
if (rmem->nr_pages > 1 || rmem->depth > 0) {
927
if (i == rmem->nr_pages - 2 &&
928
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
929
extra_bits |= PTU_PTE_NEXT_TO_LAST;
930
else if (i == rmem->nr_pages - 1 &&
931
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
932
extra_bits |= PTU_PTE_LAST;
933
934
ptr = (void *) rmem->pg_tbl.idi_vaddr;
935
ptr[i] = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
936
}
937
}
938
939
if (rmem->vmem_size) {
940
*rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
941
if (!(*rmem->vmem))
942
return -ENOMEM;
943
}
944
return 0;
945
}
946
947
948
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
949
(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \
950
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
951
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \
952
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
953
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
954
955
static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
956
struct bnxt_ctx_pg_info *ctx_pg)
957
{
958
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
959
960
rmem->page_size = BNXT_PAGE_SIZE;
961
rmem->pg_arr = ctx_pg->ctx_arr;
962
rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
963
if (rmem->depth >= 1)
964
rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
965
966
return bnxt_alloc_ring(softc, rmem);
967
}
968
969
static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
970
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
971
u8 depth, struct bnxt_ctx_mem_type *ctxm)
972
{
973
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
974
int rc;
975
976
if (!mem_size)
977
return -EINVAL;
978
979
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
980
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
981
ctx_pg->nr_pages = 0;
982
return -EINVAL;
983
}
984
if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
985
int nr_tbls, i;
986
987
rmem->depth = 2;
988
ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
989
GFP_KERNEL);
990
if (!ctx_pg->ctx_pg_tbl)
991
return -ENOMEM;
992
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
993
rmem->nr_pages = nr_tbls;
994
rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
995
if (rc)
996
return rc;
997
for (i = 0; i < nr_tbls; i++) {
998
struct bnxt_ctx_pg_info *pg_tbl;
999
1000
pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
1001
if (!pg_tbl)
1002
return -ENOMEM;
1003
ctx_pg->ctx_pg_tbl[i] = pg_tbl;
1004
rmem = &pg_tbl->ring_mem;
1005
memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
1006
rmem->depth = 1;
1007
rmem->nr_pages = MAX_CTX_PAGES;
1008
rmem->ctx_mem = ctxm;
1009
if (i == (nr_tbls - 1)) {
1010
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
1011
1012
if (rem)
1013
rmem->nr_pages = rem;
1014
}
1015
rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
1016
if (rc)
1017
break;
1018
}
1019
} else {
1020
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
1021
if (rmem->nr_pages > 1 || depth)
1022
rmem->depth = 1;
1023
rmem->ctx_mem = ctxm;
1024
rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
1025
}
1026
return rc;
1027
}
1028
1029
static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
1030
struct bnxt_ctx_pg_info *ctx_pg)
1031
{
1032
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
1033
1034
if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
1035
ctx_pg->ctx_pg_tbl) {
1036
int i, nr_tbls = rmem->nr_pages;
1037
1038
for (i = 0; i < nr_tbls; i++) {
1039
struct bnxt_ctx_pg_info *pg_tbl;
1040
struct bnxt_ring_mem_info *rmem2;
1041
1042
pg_tbl = ctx_pg->ctx_pg_tbl[i];
1043
if (!pg_tbl)
1044
continue;
1045
rmem2 = &pg_tbl->ring_mem;
1046
bnxt_free_ring(softc, rmem2);
1047
ctx_pg->ctx_arr[i].idi_vaddr = NULL;
1048
free(pg_tbl , M_DEVBUF);
1049
ctx_pg->ctx_pg_tbl[i] = NULL;
1050
}
1051
kfree(ctx_pg->ctx_pg_tbl);
1052
ctx_pg->ctx_pg_tbl = NULL;
1053
}
1054
bnxt_free_ring(softc, rmem);
1055
ctx_pg->nr_pages = 0;
1056
}
1057
1058
static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
1059
struct bnxt_ctx_mem_type *ctxm, u32 entries,
1060
u8 pg_lvl)
1061
{
1062
struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1063
int i, rc = 0, n = 1;
1064
u32 mem_size;
1065
1066
if (!ctxm->entry_size || !ctx_pg)
1067
return -EINVAL;
1068
if (ctxm->instance_bmap)
1069
n = hweight32(ctxm->instance_bmap);
1070
if (ctxm->entry_multiple)
1071
entries = roundup(entries, ctxm->entry_multiple);
1072
entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
1073
mem_size = entries * ctxm->entry_size;
1074
for (i = 0; i < n && !rc; i++) {
1075
ctx_pg[i].entries = entries;
1076
rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
1077
ctxm->init_value ? ctxm : NULL);
1078
}
1079
if (!rc)
1080
ctxm->mem_valid = 1;
1081
return rc;
1082
}
1083
1084
static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
1085
{
1086
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1087
u16 type;
1088
1089
if (!ctx)
1090
return;
1091
1092
for (type = 0; type < BNXT_CTX_MAX; type++) {
1093
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
1094
struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
1095
int i, n = 1;
1096
1097
if (!ctx_pg)
1098
continue;
1099
if (ctxm->instance_bmap)
1100
n = hweight32(ctxm->instance_bmap);
1101
for (i = 0; i < n; i++)
1102
bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
1103
1104
kfree(ctx_pg);
1105
ctxm->pg_info = NULL;
1106
}
1107
1108
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
1109
kfree(ctx);
1110
softc->ctx_mem = NULL;
1111
}
1112
1113
static int
1114
bnxt_backing_store_cfg_v2(struct bnxt_softc *softc, u32 ena)
1115
{
1116
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
1117
struct bnxt_ctx_mem_type *ctxm;
1118
u16 last_type = BNXT_CTX_INV;
1119
int rc = 0;
1120
u16 type;
1121
1122
if (BNXT_PF(softc)) {
1123
for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) {
1124
ctxm = &ctx->ctx_arr[type];
1125
if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID))
1126
continue;
1127
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1128
if (rc) {
1129
device_printf(softc->dev, "Unable to setup ctx page for type:0x%x.\n", type);
1130
rc = 0;
1131
continue;
1132
}
1133
/* ckp TODO: this is trace buffer related stuff, so keeping it diabled now. needs revisit */
1134
//bnxt_bs_trace_init(bp, ctxm, type - BNXT_CTX_SRT_TRACE);
1135
last_type = type;
1136
}
1137
}
1138
1139
if (last_type == BNXT_CTX_INV) {
1140
if (!ena)
1141
return 0;
1142
else if (ena & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM)
1143
last_type = BNXT_CTX_MAX - 1;
1144
else
1145
last_type = BNXT_CTX_L2_MAX - 1;
1146
}
1147
ctx->ctx_arr[last_type].last = 1;
1148
1149
for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
1150
ctxm = &ctx->ctx_arr[type];
1151
1152
if (!ctxm->mem_valid)
1153
continue;
1154
rc = bnxt_hwrm_func_backing_store_cfg_v2(softc, ctxm, ctxm->last);
1155
if (rc)
1156
return rc;
1157
}
1158
return 0;
1159
}
1160
1161
static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
1162
{
1163
struct bnxt_ctx_pg_info *ctx_pg;
1164
struct bnxt_ctx_mem_type *ctxm;
1165
struct bnxt_ctx_mem_info *ctx;
1166
u32 l2_qps, qp1_qps, max_qps;
1167
u32 ena, entries_sp, entries;
1168
u32 srqs, max_srqs, min;
1169
u32 num_mr, num_ah;
1170
u32 extra_srqs = 0;
1171
u32 extra_qps = 0;
1172
u8 pg_lvl = 1;
1173
int i, rc;
1174
1175
if (!BNXT_CHIP_P5_PLUS(softc))
1176
return 0;
1177
1178
rc = bnxt_hwrm_func_backing_store_qcaps(softc);
1179
if (rc) {
1180
device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1181
rc);
1182
return rc;
1183
}
1184
ctx = softc->ctx_mem;
1185
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1186
return 0;
1187
1188
ena = 0;
1189
if (BNXT_VF(softc))
1190
goto skip_legacy;
1191
1192
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1193
l2_qps = ctxm->qp_l2_entries;
1194
qp1_qps = ctxm->qp_qp1_entries;
1195
max_qps = ctxm->max_entries;
1196
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1197
srqs = ctxm->srq_l2_entries;
1198
max_srqs = ctxm->max_entries;
1199
if (softc->flags & BNXT_FLAG_ROCE_CAP) {
1200
pg_lvl = 2;
1201
if (BNXT_SW_RES_LMT(softc)) {
1202
extra_qps = max_qps - l2_qps - qp1_qps;
1203
extra_srqs = max_srqs - srqs;
1204
} else {
1205
extra_qps = min_t(uint32_t, 65536, max_qps - l2_qps - qp1_qps);
1206
extra_srqs = min_t(uint32_t, 8192, max_srqs - srqs);
1207
}
1208
}
1209
1210
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
1211
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
1212
pg_lvl);
1213
if (rc)
1214
return rc;
1215
1216
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
1217
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
1218
if (rc)
1219
return rc;
1220
1221
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
1222
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
1223
extra_qps * 2, pg_lvl);
1224
if (rc)
1225
return rc;
1226
1227
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
1228
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1229
if (rc)
1230
return rc;
1231
1232
ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
1233
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
1234
if (rc)
1235
return rc;
1236
1237
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
1238
goto skip_rdma;
1239
1240
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
1241
ctx_pg = ctxm->pg_info;
1242
/* 128K extra is needed to accomodate static AH context
1243
* allocation by f/w.
1244
*/
1245
num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
1246
num_ah = min_t(u32, num_mr, 1024 * 128);
1247
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
1248
if (rc)
1249
return rc;
1250
ctx_pg->entries = num_mr + num_ah;
1251
ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1252
if (ctxm->mrav_num_entries_units)
1253
ctx_pg->entries =
1254
((num_mr / ctxm->mrav_num_entries_units) << 16) |
1255
(num_ah / ctxm->mrav_num_entries_units);
1256
1257
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
1258
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
1259
if (rc)
1260
return rc;
1261
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1262
1263
skip_rdma:
1264
ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
1265
min = ctxm->min_entries;
1266
entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
1267
2 * (extra_qps + qp1_qps) + min;
1268
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
1269
if (rc)
1270
return rc;
1271
1272
ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
1273
entries = l2_qps + 2 * (extra_qps + qp1_qps);
1274
rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
1275
if (rc)
1276
return rc;
1277
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1278
if (i < BNXT_MAX_TQM_LEGACY_RINGS)
1279
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1280
else
1281
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
1282
}
1283
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1284
1285
skip_legacy:
1286
if (BNXT_CHIP_P7(softc)) {
1287
if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
1288
rc = bnxt_backing_store_cfg_v2(softc, ena);
1289
} else {
1290
rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1291
}
1292
if (rc) {
1293
device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1294
rc);
1295
return rc;
1296
}
1297
ctx->flags |= BNXT_CTX_FLAG_INITED;
1298
1299
return 0;
1300
}
1301
1302
/*
1303
* If we update the index, a write barrier is needed after the write to ensure
1304
* the completion ring has space before the RX/TX ring does. Since we can't
1305
* make the RX and AG doorbells covered by the same barrier without remapping
1306
* MSI-X vectors, we create the barrier over the enture doorbell bar.
1307
* TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1308
* for a single ring group.
1309
*
1310
* A barrier of just the size of the write is used to ensure the ordering
1311
* remains correct and no writes are lost.
1312
*/
1313
1314
static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1315
{
1316
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1317
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1318
1319
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1320
BUS_SPACE_BARRIER_WRITE);
1321
bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1322
htole32(RX_DOORBELL_KEY_RX | idx));
1323
}
1324
1325
static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1326
{
1327
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1328
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1329
1330
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1331
BUS_SPACE_BARRIER_WRITE);
1332
bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1333
htole32(TX_DOORBELL_KEY_TX | idx));
1334
}
1335
1336
static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1337
{
1338
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1339
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1340
1341
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1342
BUS_SPACE_BARRIER_WRITE);
1343
bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1344
htole32(CMPL_DOORBELL_KEY_CMPL |
1345
((cpr->cons == UINT32_MAX) ? 0 :
1346
(cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1347
((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1348
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1349
BUS_SPACE_BARRIER_WRITE);
1350
}
1351
1352
static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1353
{
1354
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1355
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1356
1357
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1358
BUS_SPACE_BARRIER_WRITE);
1359
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1360
htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1361
((uint64_t)ring->phys_id << DBR_XID_SFT)));
1362
}
1363
1364
static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1365
{
1366
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1367
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1368
1369
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1370
BUS_SPACE_BARRIER_WRITE);
1371
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1372
htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1373
((uint64_t)ring->phys_id << DBR_XID_SFT)));
1374
}
1375
1376
static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1377
{
1378
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1379
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1380
dbc_dbc_t db_msg = { 0 };
1381
uint32_t cons = cpr->cons;
1382
1383
if (cons == UINT32_MAX)
1384
cons = 0;
1385
else
1386
cons = RING_NEXT(&cpr->ring, cons);
1387
1388
db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1389
1390
db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1391
DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1392
((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1393
1394
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1395
BUS_SPACE_BARRIER_WRITE);
1396
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1397
htole64(*(uint64_t *)&db_msg));
1398
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1399
BUS_SPACE_BARRIER_WRITE);
1400
}
1401
1402
static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1403
{
1404
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1405
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1406
dbc_dbc_t db_msg = { 0 };
1407
uint32_t cons = cpr->cons;
1408
1409
db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1410
1411
db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1412
DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1413
((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1414
1415
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1416
BUS_SPACE_BARRIER_WRITE);
1417
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1418
htole64(*(uint64_t *)&db_msg));
1419
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1420
BUS_SPACE_BARRIER_WRITE);
1421
}
1422
1423
static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1424
{
1425
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1426
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1427
dbc_dbc_t db_msg = { 0 };
1428
uint32_t cons = cpr->cons;
1429
1430
db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1431
1432
db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1433
DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1434
((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1435
1436
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1437
BUS_SPACE_BARRIER_WRITE);
1438
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1439
htole64(*(uint64_t *)&db_msg));
1440
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1441
BUS_SPACE_BARRIER_WRITE);
1442
}
1443
1444
static void
1445
bnxt_thor2_db_rx(void *db_ptr, uint16_t idx)
1446
{
1447
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1448
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1449
uint64_t db_val;
1450
1451
if (idx >= ring->ring_size) {
1452
device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1453
return;
1454
}
1455
1456
db_val = ((DBR_PATH_L2 | DBR_TYPE_SRQ | DBR_VALID | idx) |
1457
((uint64_t)ring->phys_id << DBR_XID_SFT));
1458
1459
/* Add the PI index */
1460
db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1461
1462
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1463
BUS_SPACE_BARRIER_WRITE);
1464
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1465
htole64(db_val));
1466
}
1467
1468
static void
1469
bnxt_thor2_db_tx(void *db_ptr, uint16_t idx)
1470
{
1471
struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1472
struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1473
uint64_t db_val;
1474
1475
if (idx >= ring->ring_size) {
1476
device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
1477
return;
1478
}
1479
1480
db_val = ((DBR_PATH_L2 | DBR_TYPE_SQ | DBR_VALID | idx) |
1481
((uint64_t)ring->phys_id << DBR_XID_SFT));
1482
1483
/* Add the PI index */
1484
db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
1485
1486
bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1487
BUS_SPACE_BARRIER_WRITE);
1488
bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1489
htole64(db_val));
1490
}
1491
1492
static void
1493
bnxt_thor2_db_rx_cq(void *db_ptr, bool enable_irq)
1494
{
1495
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1496
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1497
u64 db_msg = { 0 };
1498
uint32_t cons = cpr->raw_cons;
1499
uint32_t toggle = 0;
1500
1501
if (cons == UINT32_MAX)
1502
cons = 0;
1503
1504
if (enable_irq == true)
1505
toggle = cpr->toggle;
1506
1507
db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1508
DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1509
1510
if (enable_irq)
1511
db_msg |= DBR_TYPE_CQ_ARMALL;
1512
else
1513
db_msg |= DBR_TYPE_CQ;
1514
1515
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1516
BUS_SPACE_BARRIER_WRITE);
1517
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1518
htole64(*(uint64_t *)&db_msg));
1519
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1520
BUS_SPACE_BARRIER_WRITE);
1521
}
1522
1523
static void
1524
bnxt_thor2_db_tx_cq(void *db_ptr, bool enable_irq)
1525
{
1526
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1527
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1528
u64 db_msg = { 0 };
1529
uint32_t cons = cpr->raw_cons;
1530
uint32_t toggle = 0;
1531
1532
if (enable_irq == true)
1533
toggle = cpr->toggle;
1534
1535
db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1536
DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1537
1538
if (enable_irq)
1539
db_msg |= DBR_TYPE_CQ_ARMALL;
1540
else
1541
db_msg |= DBR_TYPE_CQ;
1542
1543
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1544
BUS_SPACE_BARRIER_WRITE);
1545
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1546
htole64(*(uint64_t *)&db_msg));
1547
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1548
BUS_SPACE_BARRIER_WRITE);
1549
}
1550
1551
static void
1552
bnxt_thor2_db_nq(void *db_ptr, bool enable_irq)
1553
{
1554
struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1555
struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1556
u64 db_msg = { 0 };
1557
uint32_t cons = cpr->raw_cons;
1558
uint32_t toggle = 0;
1559
1560
if (enable_irq == true)
1561
toggle = cpr->toggle;
1562
1563
db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
1564
DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
1565
1566
if (enable_irq)
1567
db_msg |= DBR_TYPE_NQ_ARM;
1568
else
1569
db_msg |= DBR_TYPE_NQ_MASK;
1570
1571
bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1572
BUS_SPACE_BARRIER_WRITE);
1573
bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1574
htole64(*(uint64_t *)&db_msg));
1575
bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1576
BUS_SPACE_BARRIER_WRITE);
1577
}
1578
1579
struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1580
{
1581
struct bnxt_softc_list *sc = NULL;
1582
1583
SLIST_FOREACH(sc, &pf_list, next) {
1584
/* get the softc reference based on device name */
1585
if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1586
return sc->softc;
1587
}
1588
/* get the softc reference based on domain,bus,device,function */
1589
if (!dev_name &&
1590
(domain == sc->softc->domain) &&
1591
(bus == sc->softc->bus) &&
1592
(dev_fn == sc->softc->dev_fn)) {
1593
return sc->softc;
1594
1595
}
1596
}
1597
1598
return NULL;
1599
}
1600
1601
1602
static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
1603
{
1604
uint8_t i, lltc = 0;
1605
1606
if (!softc->max_lltc)
1607
return;
1608
1609
/* Verify that lossless TX and RX queues are in the same index */
1610
for (i = 0; i < softc->max_tc; i++) {
1611
if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
1612
BNXT_LLQ(softc->rx_q_info[i].queue_profile))
1613
lltc++;
1614
}
1615
softc->max_lltc = min(softc->max_lltc, lltc);
1616
}
1617
1618
static int bnxt_hwrm_poll(struct bnxt_softc *bp)
1619
{
1620
struct hwrm_ver_get_output *resp =
1621
(void *)bp->hwrm_cmd_resp.idi_vaddr;
1622
struct hwrm_ver_get_input req = {0};
1623
int rc;
1624
1625
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
1626
1627
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1628
req.hwrm_intf_min = HWRM_VERSION_MINOR;
1629
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1630
1631
rc = _hwrm_send_message(bp, &req, sizeof(req));
1632
if (rc)
1633
return rc;
1634
1635
if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
1636
rc = -EAGAIN;
1637
1638
return rc;
1639
}
1640
1641
static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
1642
{
1643
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
1644
* set. If the device is being closed, bnxt_close() may be holding
1645
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
1646
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
1647
*/
1648
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1649
rtnl_lock();
1650
}
1651
1652
static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
1653
{
1654
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
1655
rtnl_unlock();
1656
}
1657
1658
static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
1659
{
1660
bnxt_disable_intr(softc->ctx);
1661
if (pci_is_enabled(softc->pdev))
1662
pci_disable_device(softc->pdev);
1663
}
1664
1665
static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
1666
{
1667
struct bnxt_fw_health *fw_health = bp->fw_health;
1668
u32 reg = fw_health->regs[reg_idx];
1669
u32 reg_type, reg_off, val = 0;
1670
1671
reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1672
reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1673
switch (reg_type) {
1674
case BNXT_FW_HEALTH_REG_TYPE_CFG:
1675
pci_read_config_dword(bp->pdev, reg_off, &val);
1676
break;
1677
case BNXT_FW_HEALTH_REG_TYPE_GRC:
1678
reg_off = fw_health->mapped_regs[reg_idx];
1679
fallthrough;
1680
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1681
val = readl_fbsd(bp, reg_off, 0);
1682
break;
1683
case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1684
val = readl_fbsd(bp, reg_off, 2);
1685
break;
1686
}
1687
if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1688
val &= fw_health->fw_reset_inprog_reg_mask;
1689
return val;
1690
}
1691
1692
static void bnxt_fw_reset_close(struct bnxt_softc *bp)
1693
{
1694
int i;
1695
bnxt_ulp_stop(bp);
1696
/* When firmware is in fatal state, quiesce device and disable
1697
* bus master to prevent any potential bad DMAs before freeing
1698
* kernel memory.
1699
*/
1700
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
1701
u16 val = 0;
1702
1703
val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
1704
if (val == 0xffff) {
1705
bp->fw_reset_min_dsecs = 0;
1706
}
1707
bnxt_fw_fatal_close(bp);
1708
}
1709
1710
iflib_request_reset(bp->ctx);
1711
bnxt_stop(bp->ctx);
1712
bnxt_hwrm_func_drv_unrgtr(bp, false);
1713
1714
for (i = bp->nrxqsets-1; i>=0; i--) {
1715
if (BNXT_CHIP_P5_PLUS(bp))
1716
iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
1717
else
1718
iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
1719
1720
}
1721
if (pci_is_enabled(bp->pdev))
1722
pci_disable_device(bp->pdev);
1723
pci_disable_busmaster(bp->dev);
1724
bnxt_free_ctx_mem(bp);
1725
}
1726
1727
static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
1728
{
1729
struct bnxt_fw_health *fw_health = bp->fw_health;
1730
bool no_heartbeat = false, has_reset = false;
1731
u32 val;
1732
1733
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
1734
if (val == fw_health->last_fw_heartbeat)
1735
no_heartbeat = true;
1736
1737
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1738
if (val != fw_health->last_fw_reset_cnt)
1739
has_reset = true;
1740
1741
if (!no_heartbeat && has_reset)
1742
return true;
1743
1744
return false;
1745
}
1746
1747
void bnxt_fw_reset(struct bnxt_softc *bp)
1748
{
1749
bnxt_rtnl_lock_sp(bp);
1750
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
1751
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1752
int tmo;
1753
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
1754
bnxt_fw_reset_close(bp);
1755
1756
if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
1757
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
1758
tmo = HZ / 10;
1759
} else {
1760
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
1761
tmo = bp->fw_reset_min_dsecs * HZ /10;
1762
}
1763
bnxt_queue_fw_reset_work(bp, tmo);
1764
}
1765
bnxt_rtnl_unlock_sp(bp);
1766
}
1767
1768
static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
1769
{
1770
if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1771
return;
1772
1773
if (BNXT_PF(bp))
1774
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1775
else
1776
schedule_delayed_work(&bp->fw_reset_task, delay);
1777
}
1778
1779
void bnxt_queue_sp_work(struct bnxt_softc *bp)
1780
{
1781
if (BNXT_PF(bp))
1782
queue_work(bnxt_pf_wq, &bp->sp_task);
1783
else
1784
schedule_work(&bp->sp_task);
1785
}
1786
1787
static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
1788
{
1789
struct bnxt_fw_health *fw_health = bp->fw_health;
1790
u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
1791
u32 val = fw_health->fw_reset_seq_vals[reg_idx];
1792
u32 reg_type, reg_off, delay_msecs;
1793
1794
delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
1795
reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1796
reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1797
switch (reg_type) {
1798
case BNXT_FW_HEALTH_REG_TYPE_CFG:
1799
pci_write_config_dword(bp->pdev, reg_off, val);
1800
break;
1801
case BNXT_FW_HEALTH_REG_TYPE_GRC:
1802
writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
1803
reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
1804
fallthrough;
1805
case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1806
writel_fbsd(bp, reg_off, 0, val);
1807
break;
1808
case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1809
writel_fbsd(bp, reg_off, 2, val);
1810
break;
1811
}
1812
if (delay_msecs) {
1813
pci_read_config_dword(bp->pdev, 0, &val);
1814
msleep(delay_msecs);
1815
}
1816
}
1817
1818
static void bnxt_reset_all(struct bnxt_softc *bp)
1819
{
1820
struct bnxt_fw_health *fw_health = bp->fw_health;
1821
int i, rc;
1822
1823
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
1824
bp->fw_reset_timestamp = jiffies;
1825
return;
1826
}
1827
1828
if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
1829
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
1830
bnxt_fw_reset_writel(bp, i);
1831
} else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
1832
struct hwrm_fw_reset_input req = {0};
1833
1834
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
1835
req.target_id = htole16(HWRM_TARGET_ID_KONG);
1836
req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
1837
req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
1838
req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
1839
rc = hwrm_send_message(bp, &req, sizeof(req));
1840
1841
if (rc != -ENODEV)
1842
device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
1843
}
1844
bp->fw_reset_timestamp = jiffies;
1845
}
1846
1847
static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
1848
{
1849
if (bp->fw_health)
1850
return 0;
1851
1852
bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
1853
if (!bp->fw_health)
1854
return -ENOMEM;
1855
1856
mutex_init(&bp->fw_health->lock);
1857
return 0;
1858
}
1859
1860
static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
1861
{
1862
int rc;
1863
1864
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
1865
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1866
return 0;
1867
1868
rc = __bnxt_alloc_fw_health(bp);
1869
if (rc) {
1870
bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
1871
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1872
return rc;
1873
}
1874
1875
return 0;
1876
}
1877
1878
static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
1879
{
1880
writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
1881
}
1882
1883
static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
1884
{
1885
struct bnxt_fw_health *fw_health = bp->fw_health;
1886
u32 reg_base = 0xffffffff;
1887
int i;
1888
1889
bp->fw_health->status_reliable = false;
1890
bp->fw_health->resets_reliable = false;
1891
/* Only pre-map the monitoring GRC registers using window 3 */
1892
for (i = 0; i < 4; i++) {
1893
u32 reg = fw_health->regs[i];
1894
1895
if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
1896
continue;
1897
if (reg_base == 0xffffffff)
1898
reg_base = reg & BNXT_GRC_BASE_MASK;
1899
if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
1900
return -ERANGE;
1901
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
1902
}
1903
bp->fw_health->status_reliable = true;
1904
bp->fw_health->resets_reliable = true;
1905
if (reg_base == 0xffffffff)
1906
return 0;
1907
1908
__bnxt_map_fw_health_reg(bp, reg_base);
1909
return 0;
1910
}
1911
1912
static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
1913
{
1914
struct bnxt_fw_health *fw_health = bp->fw_health;
1915
u32 reg_type;
1916
1917
if (!fw_health)
1918
return;
1919
1920
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
1921
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1922
fw_health->status_reliable = false;
1923
1924
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
1925
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
1926
fw_health->resets_reliable = false;
1927
}
1928
1929
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
1930
{
1931
struct bnxt_fw_health *fw_health = bp->fw_health;
1932
struct hwrm_error_recovery_qcfg_output *resp =
1933
(void *)bp->hwrm_cmd_resp.idi_vaddr;
1934
struct hwrm_error_recovery_qcfg_input req = {0};
1935
int rc, i;
1936
1937
if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
1938
return 0;
1939
1940
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
1941
rc = _hwrm_send_message(bp, &req, sizeof(req));
1942
1943
if (rc)
1944
goto err_recovery_out;
1945
fw_health->flags = le32toh(resp->flags);
1946
if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
1947
!(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
1948
rc = -EINVAL;
1949
goto err_recovery_out;
1950
}
1951
fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
1952
fw_health->master_func_wait_dsecs =
1953
le32toh(resp->master_func_wait_period);
1954
fw_health->normal_func_wait_dsecs =
1955
le32toh(resp->normal_func_wait_period);
1956
fw_health->post_reset_wait_dsecs =
1957
le32toh(resp->master_func_wait_period_after_reset);
1958
fw_health->post_reset_max_wait_dsecs =
1959
le32toh(resp->max_bailout_time_after_reset);
1960
fw_health->regs[BNXT_FW_HEALTH_REG] =
1961
le32toh(resp->fw_health_status_reg);
1962
fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
1963
le32toh(resp->fw_heartbeat_reg);
1964
fw_health->regs[BNXT_FW_RESET_CNT_REG] =
1965
le32toh(resp->fw_reset_cnt_reg);
1966
fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
1967
le32toh(resp->reset_inprogress_reg);
1968
fw_health->fw_reset_inprog_reg_mask =
1969
le32toh(resp->reset_inprogress_reg_mask);
1970
fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
1971
if (fw_health->fw_reset_seq_cnt >= 16) {
1972
rc = -EINVAL;
1973
goto err_recovery_out;
1974
}
1975
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
1976
fw_health->fw_reset_seq_regs[i] =
1977
le32toh(resp->reset_reg[i]);
1978
fw_health->fw_reset_seq_vals[i] =
1979
le32toh(resp->reset_reg_val[i]);
1980
fw_health->fw_reset_seq_delay_msec[i] =
1981
le32toh(resp->delay_after_reset[i]);
1982
}
1983
err_recovery_out:
1984
if (!rc)
1985
rc = bnxt_map_fw_health_regs(bp);
1986
if (rc)
1987
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
1988
return rc;
1989
}
1990
1991
static int bnxt_drv_rgtr(struct bnxt_softc *bp)
1992
{
1993
int rc;
1994
1995
/* determine whether we can support error recovery before
1996
* registering with FW
1997
*/
1998
if (bnxt_alloc_fw_health(bp)) {
1999
device_printf(bp->dev, "no memory for firmware error recovery\n");
2000
} else {
2001
rc = bnxt_hwrm_error_recovery_qcfg(bp);
2002
if (rc)
2003
device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
2004
rc);
2005
}
2006
rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); //sumit dbg: revisit the params
2007
if (rc)
2008
return -ENODEV;
2009
return 0;
2010
}
2011
2012
static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
2013
{
2014
return time_after(jiffies, bp->fw_reset_timestamp +
2015
(bp->fw_reset_max_dsecs * HZ / 10));
2016
}
2017
2018
static int bnxt_open(struct bnxt_softc *bp)
2019
{
2020
int rc = 0;
2021
if (BNXT_PF(bp))
2022
rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
2023
&bp->nvm_info->device_id, &bp->nvm_info->sector_size,
2024
&bp->nvm_info->size, &bp->nvm_info->reserved_size,
2025
&bp->nvm_info->available_size);
2026
2027
/* Get the queue config */
2028
rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2029
if (rc) {
2030
device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
2031
return rc;
2032
}
2033
if (bp->is_asym_q) {
2034
rc = bnxt_hwrm_queue_qportcfg(bp,
2035
HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2036
if (rc) {
2037
device_printf(bp->dev, "re-init: hwrm qportcfg (rx) failed\n");
2038
return rc;
2039
}
2040
bnxt_verify_asym_queues(bp);
2041
} else {
2042
bp->rx_max_q = bp->tx_max_q;
2043
memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
2044
memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
2045
}
2046
/* Get the HW capabilities */
2047
rc = bnxt_hwrm_func_qcaps(bp);
2048
if (rc)
2049
return rc;
2050
2051
/* Register the driver with the FW */
2052
rc = bnxt_drv_rgtr(bp);
2053
if (rc)
2054
return rc;
2055
if (bp->hwrm_spec_code >= 0x10803) {
2056
rc = bnxt_alloc_ctx_mem(bp);
2057
if (rc) {
2058
device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
2059
return rc;
2060
}
2061
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
2062
if (!rc)
2063
bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2064
}
2065
2066
if (BNXT_CHIP_P5_PLUS(bp))
2067
bnxt_hwrm_reserve_pf_rings(bp);
2068
/* Get the current configuration of this function */
2069
rc = bnxt_hwrm_func_qcfg(bp);
2070
if (rc) {
2071
device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
2072
return rc;
2073
}
2074
2075
bnxt_msix_intr_assign(bp->ctx, 0);
2076
bnxt_init(bp->ctx);
2077
bnxt_intr_enable(bp->ctx);
2078
2079
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
2080
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2081
bnxt_ulp_start(bp, 0);
2082
}
2083
}
2084
2085
device_printf(bp->dev, "Network interface is UP and operational\n");
2086
2087
return rc;
2088
}
2089
static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
2090
{
2091
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2092
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
2093
bnxt_ulp_start(bp, rc);
2094
}
2095
bp->fw_reset_state = 0;
2096
}
2097
2098
static void bnxt_fw_reset_task(struct work_struct *work)
2099
{
2100
struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
2101
int rc = 0;
2102
2103
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
2104
device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
2105
return;
2106
}
2107
2108
switch (bp->fw_reset_state) {
2109
case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
2110
u32 val;
2111
2112
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2113
if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
2114
!bnxt_fw_reset_timeout(bp)) {
2115
bnxt_queue_fw_reset_work(bp, HZ / 5);
2116
return;
2117
}
2118
2119
if (!bp->fw_health->primary) {
2120
u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
2121
2122
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2123
bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2124
return;
2125
}
2126
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2127
}
2128
fallthrough;
2129
case BNXT_FW_RESET_STATE_RESET_FW:
2130
bnxt_reset_all(bp);
2131
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2132
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
2133
return;
2134
case BNXT_FW_RESET_STATE_ENABLE_DEV:
2135
bnxt_inv_fw_health_reg(bp);
2136
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
2137
!bp->fw_reset_min_dsecs) {
2138
u16 val;
2139
2140
val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
2141
if (val == 0xffff) {
2142
if (bnxt_fw_reset_timeout(bp)) {
2143
device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
2144
rc = -ETIMEDOUT;
2145
goto fw_reset_abort;
2146
}
2147
bnxt_queue_fw_reset_work(bp, HZ / 1000);
2148
return;
2149
}
2150
}
2151
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2152
clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2153
if (!pci_is_enabled(bp->pdev)) {
2154
if (pci_enable_device(bp->pdev)) {
2155
device_printf(bp->dev, "Cannot re-enable PCI device\n");
2156
rc = -ENODEV;
2157
goto fw_reset_abort;
2158
}
2159
}
2160
pci_set_master(bp->pdev);
2161
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
2162
fallthrough;
2163
case BNXT_FW_RESET_STATE_POLL_FW:
2164
bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
2165
rc = bnxt_hwrm_poll(bp);
2166
if (rc) {
2167
if (bnxt_fw_reset_timeout(bp)) {
2168
device_printf(bp->dev, "Firmware reset aborted\n");
2169
goto fw_reset_abort_status;
2170
}
2171
bnxt_queue_fw_reset_work(bp, HZ / 5);
2172
return;
2173
}
2174
bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
2175
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
2176
fallthrough;
2177
case BNXT_FW_RESET_STATE_OPENING:
2178
rc = bnxt_open(bp);
2179
if (rc) {
2180
device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
2181
bnxt_fw_reset_abort(bp, rc);
2182
rtnl_unlock();
2183
return;
2184
}
2185
2186
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
2187
bp->fw_health->enabled) {
2188
bp->fw_health->last_fw_reset_cnt =
2189
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2190
}
2191
bp->fw_reset_state = 0;
2192
smp_mb__before_atomic();
2193
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
2194
bnxt_ulp_start(bp, 0);
2195
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
2196
set_bit(BNXT_STATE_OPEN, &bp->state);
2197
rtnl_unlock();
2198
}
2199
return;
2200
2201
fw_reset_abort_status:
2202
if (bp->fw_health->status_reliable ||
2203
(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
2204
u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2205
2206
device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
2207
}
2208
fw_reset_abort:
2209
rtnl_lock();
2210
bnxt_fw_reset_abort(bp, rc);
2211
rtnl_unlock();
2212
}
2213
2214
static void bnxt_force_fw_reset(struct bnxt_softc *bp)
2215
{
2216
struct bnxt_fw_health *fw_health = bp->fw_health;
2217
u32 wait_dsecs;
2218
2219
if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
2220
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
2221
return;
2222
bnxt_fw_reset_close(bp);
2223
wait_dsecs = fw_health->master_func_wait_dsecs;
2224
if (fw_health->primary) {
2225
if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
2226
wait_dsecs = 0;
2227
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
2228
} else {
2229
bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
2230
wait_dsecs = fw_health->normal_func_wait_dsecs;
2231
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
2232
}
2233
2234
bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
2235
bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
2236
bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
2237
}
2238
2239
static void bnxt_fw_exception(struct bnxt_softc *bp)
2240
{
2241
device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
2242
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2243
bnxt_rtnl_lock_sp(bp);
2244
bnxt_force_fw_reset(bp);
2245
bnxt_rtnl_unlock_sp(bp);
2246
}
2247
2248
static void __bnxt_fw_recover(struct bnxt_softc *bp)
2249
{
2250
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2251
test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2252
bnxt_fw_reset(bp);
2253
else
2254
bnxt_fw_exception(bp);
2255
}
2256
2257
static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
2258
{
2259
struct bnxt_fw_health *fw_health = bp->fw_health;
2260
2261
if (!fw_health)
2262
return;
2263
2264
if (!fw_health->fw_reporter) {
2265
__bnxt_fw_recover(bp);
2266
return;
2267
}
2268
}
2269
2270
static void bnxt_sp_task(struct work_struct *work)
2271
{
2272
struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
2273
2274
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2275
smp_mb__after_atomic();
2276
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
2277
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2278
return;
2279
}
2280
2281
if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
2282
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
2283
test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
2284
bnxt_devlink_health_fw_report(bp);
2285
else
2286
bnxt_fw_reset(bp);
2287
}
2288
2289
if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
2290
if (!is_bnxt_fw_ok(bp))
2291
bnxt_devlink_health_fw_report(bp);
2292
}
2293
smp_mb__before_atomic();
2294
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
2295
}
2296
2297
/* Device setup and teardown */
2298
static int
2299
bnxt_attach_pre(if_ctx_t ctx)
2300
{
2301
struct bnxt_softc *softc = iflib_get_softc(ctx);
2302
if_softc_ctx_t scctx;
2303
int rc = 0;
2304
2305
softc->ctx = ctx;
2306
softc->dev = iflib_get_dev(ctx);
2307
softc->media = iflib_get_media(ctx);
2308
softc->scctx = iflib_get_softc_ctx(ctx);
2309
softc->sctx = iflib_get_sctx(ctx);
2310
scctx = softc->scctx;
2311
2312
/* TODO: Better way of detecting NPAR/VF is needed */
2313
switch (pci_get_device(softc->dev)) {
2314
case BCM57402_NPAR:
2315
case BCM57404_NPAR:
2316
case BCM57406_NPAR:
2317
case BCM57407_NPAR:
2318
case BCM57412_NPAR1:
2319
case BCM57412_NPAR2:
2320
case BCM57414_NPAR1:
2321
case BCM57414_NPAR2:
2322
case BCM57416_NPAR1:
2323
case BCM57416_NPAR2:
2324
case BCM57504_NPAR:
2325
softc->flags |= BNXT_FLAG_NPAR;
2326
break;
2327
case NETXTREME_C_VF1:
2328
case NETXTREME_C_VF2:
2329
case NETXTREME_C_VF3:
2330
case NETXTREME_E_VF1:
2331
case NETXTREME_E_VF2:
2332
case NETXTREME_E_VF3:
2333
softc->flags |= BNXT_FLAG_VF;
2334
break;
2335
}
2336
2337
softc->domain = pci_get_domain(softc->dev);
2338
softc->bus = pci_get_bus(softc->dev);
2339
softc->slot = pci_get_slot(softc->dev);
2340
softc->function = pci_get_function(softc->dev);
2341
softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
2342
2343
if (bnxt_num_pfs == 0)
2344
SLIST_INIT(&pf_list);
2345
bnxt_num_pfs++;
2346
softc->list.softc = softc;
2347
SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
2348
2349
pci_enable_busmaster(softc->dev);
2350
2351
if (bnxt_pci_mapping(softc)) {
2352
device_printf(softc->dev, "PCI mapping failed\n");
2353
rc = ENXIO;
2354
goto pci_map_fail;
2355
}
2356
2357
softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
2358
if (!softc->pdev) {
2359
device_printf(softc->dev, "pdev alloc failed\n");
2360
rc = -ENOMEM;
2361
goto free_pci_map;
2362
}
2363
2364
rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
2365
if (rc) {
2366
device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
2367
goto pci_attach_fail;
2368
}
2369
2370
/* HWRM setup/init */
2371
BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
2372
rc = bnxt_alloc_hwrm_dma_mem(softc);
2373
if (rc)
2374
goto dma_fail;
2375
2376
/* Get firmware version and compare with driver */
2377
softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
2378
M_DEVBUF, M_NOWAIT | M_ZERO);
2379
if (softc->ver_info == NULL) {
2380
rc = ENOMEM;
2381
device_printf(softc->dev,
2382
"Unable to allocate space for version info\n");
2383
goto ver_alloc_fail;
2384
}
2385
/* Default minimum required HWRM version */
2386
softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
2387
softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
2388
softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
2389
2390
rc = bnxt_hwrm_ver_get(softc);
2391
if (rc) {
2392
device_printf(softc->dev, "attach: hwrm ver get failed\n");
2393
goto ver_fail;
2394
}
2395
2396
/* Now perform a function reset */
2397
rc = bnxt_hwrm_func_reset(softc);
2398
2399
if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
2400
softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
2401
rc = bnxt_alloc_hwrm_short_cmd_req(softc);
2402
if (rc)
2403
goto hwrm_short_cmd_alloc_fail;
2404
}
2405
2406
if ((softc->ver_info->chip_num == BCM57508) ||
2407
(softc->ver_info->chip_num == BCM57504) ||
2408
(softc->ver_info->chip_num == BCM57504_NPAR) ||
2409
(softc->ver_info->chip_num == BCM57502) ||
2410
(softc->ver_info->chip_num == BCM57601) ||
2411
(softc->ver_info->chip_num == BCM57602) ||
2412
(softc->ver_info->chip_num == BCM57604))
2413
softc->flags |= BNXT_FLAG_CHIP_P5;
2414
2415
if (softc->ver_info->chip_num == BCM57608)
2416
softc->flags |= BNXT_FLAG_CHIP_P7;
2417
2418
softc->flags |= BNXT_FLAG_TPA;
2419
2420
if (BNXT_CHIP_P5_PLUS(softc) && (!softc->ver_info->chip_rev) &&
2421
(!softc->ver_info->chip_metal))
2422
softc->flags &= ~BNXT_FLAG_TPA;
2423
2424
if (BNXT_CHIP_P5_PLUS(softc))
2425
softc->flags &= ~BNXT_FLAG_TPA;
2426
2427
/* Get NVRAM info */
2428
if (BNXT_PF(softc)) {
2429
if (!bnxt_pf_wq) {
2430
bnxt_pf_wq =
2431
create_singlethread_workqueue("bnxt_pf_wq");
2432
if (!bnxt_pf_wq) {
2433
device_printf(softc->dev, "Unable to create workqueue.\n");
2434
rc = -ENOMEM;
2435
goto nvm_alloc_fail;
2436
}
2437
}
2438
2439
softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
2440
M_DEVBUF, M_NOWAIT | M_ZERO);
2441
if (softc->nvm_info == NULL) {
2442
rc = ENOMEM;
2443
device_printf(softc->dev,
2444
"Unable to allocate space for NVRAM info\n");
2445
goto nvm_alloc_fail;
2446
}
2447
2448
rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
2449
&softc->nvm_info->device_id, &softc->nvm_info->sector_size,
2450
&softc->nvm_info->size, &softc->nvm_info->reserved_size,
2451
&softc->nvm_info->available_size);
2452
}
2453
2454
if (BNXT_CHIP_P5(softc)) {
2455
softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
2456
softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
2457
softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
2458
softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
2459
softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
2460
} else if (BNXT_CHIP_P7(softc)) {
2461
softc->db_ops.bnxt_db_tx = bnxt_thor2_db_tx;
2462
softc->db_ops.bnxt_db_rx = bnxt_thor2_db_rx;
2463
softc->db_ops.bnxt_db_rx_cq = bnxt_thor2_db_rx_cq;
2464
softc->db_ops.bnxt_db_tx_cq = bnxt_thor2_db_tx_cq;
2465
softc->db_ops.bnxt_db_nq = bnxt_thor2_db_nq;
2466
} else {
2467
softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
2468
softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
2469
softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
2470
softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
2471
}
2472
2473
2474
/* Get the queue config */
2475
rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
2476
if (rc) {
2477
device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
2478
goto failed;
2479
}
2480
if (softc->is_asym_q) {
2481
rc = bnxt_hwrm_queue_qportcfg(softc,
2482
HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
2483
if (rc) {
2484
device_printf(softc->dev, "attach: hwrm qportcfg (rx) failed\n");
2485
return rc;
2486
}
2487
bnxt_verify_asym_queues(softc);
2488
} else {
2489
softc->rx_max_q = softc->tx_max_q;
2490
memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
2491
memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
2492
}
2493
2494
/* Get the HW capabilities */
2495
rc = bnxt_hwrm_func_qcaps(softc);
2496
if (rc)
2497
goto failed;
2498
2499
/*
2500
* Register the driver with the FW
2501
* Register the async events with the FW
2502
*/
2503
rc = bnxt_drv_rgtr(softc);
2504
if (rc)
2505
goto failed;
2506
2507
if (softc->hwrm_spec_code >= 0x10803) {
2508
rc = bnxt_alloc_ctx_mem(softc);
2509
if (rc) {
2510
device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
2511
return rc;
2512
}
2513
rc = bnxt_hwrm_func_resc_qcaps(softc, true);
2514
if (!rc)
2515
softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
2516
}
2517
2518
/* Get the current configuration of this function */
2519
rc = bnxt_hwrm_func_qcfg(softc);
2520
if (rc) {
2521
device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2522
goto failed;
2523
}
2524
2525
iflib_set_mac(ctx, softc->func.mac_addr);
2526
2527
scctx->isc_txrx = &bnxt_txrx;
2528
scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
2529
CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
2530
scctx->isc_capabilities = scctx->isc_capenable =
2531
/* These are translated to hwassit bits */
2532
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
2533
/* These are checked by iflib */
2534
IFCAP_LRO | IFCAP_VLAN_HWFILTER |
2535
/* These are part of the iflib mask */
2536
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
2537
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
2538
/* These likely get lost... */
2539
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
2540
2541
if (bnxt_wol_supported(softc))
2542
scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
2543
bnxt_get_wol_settings(softc);
2544
if (softc->wol)
2545
scctx->isc_capenable |= IFCAP_WOL_MAGIC;
2546
2547
/* Get the queue config */
2548
bnxt_get_wol_settings(softc);
2549
if (BNXT_CHIP_P5_PLUS(softc))
2550
bnxt_hwrm_reserve_pf_rings(softc);
2551
rc = bnxt_hwrm_func_qcfg(softc);
2552
if (rc) {
2553
device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
2554
goto failed;
2555
}
2556
2557
bnxt_clear_ids(softc);
2558
if (rc)
2559
goto failed;
2560
2561
/* Now set up iflib sc */
2562
scctx->isc_tx_nsegments = 31,
2563
scctx->isc_tx_tso_segments_max = 31;
2564
scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
2565
scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
2566
scctx->isc_vectors = softc->func.max_cp_rings;
2567
scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
2568
scctx->isc_txrx = &bnxt_txrx;
2569
2570
if (scctx->isc_nrxd[0] <
2571
((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
2572
device_printf(softc->dev,
2573
"WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d). Driver may be unstable\n",
2574
scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
2575
if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
2576
device_printf(softc->dev,
2577
"WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d). Driver may be unstable\n",
2578
scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
2579
scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
2580
scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
2581
scctx->isc_ntxd[1];
2582
scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
2583
scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
2584
scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
2585
scctx->isc_nrxd[1];
2586
scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
2587
scctx->isc_nrxd[2];
2588
2589
scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
2590
softc->fn_qcfg.alloc_completion_rings - 1);
2591
scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2592
softc->fn_qcfg.alloc_rx_rings);
2593
scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
2594
softc->fn_qcfg.alloc_vnics);
2595
scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
2596
softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
2597
2598
scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
2599
scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
2600
2601
/* iflib will map and release this bar */
2602
scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
2603
2604
/*
2605
* Default settings for HW LRO (TPA):
2606
* Disable HW LRO by default
2607
* Can be enabled after taking care of 'packet forwarding'
2608
*/
2609
if (softc->flags & BNXT_FLAG_TPA) {
2610
softc->hw_lro.enable = 0;
2611
softc->hw_lro.is_mode_gro = 0;
2612
softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
2613
softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
2614
softc->hw_lro.min_agg_len = 512;
2615
}
2616
2617
/* Allocate the default completion ring */
2618
softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
2619
softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
2620
softc->def_cp_ring.ring.softc = softc;
2621
softc->def_cp_ring.ring.id = 0;
2622
softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
2623
softc->legacy_db_size : softc->def_cp_ring.ring.id * 0x80;
2624
softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
2625
sizeof(struct cmpl_base);
2626
softc->def_cp_ring.ring.db_ring_mask = softc->def_cp_ring.ring.ring_size -1 ;
2627
rc = iflib_dma_alloc(ctx,
2628
sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
2629
&softc->def_cp_ring_mem, 0);
2630
softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
2631
softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
2632
iflib_config_task_init(ctx, &softc->def_cp_task, bnxt_def_cp_task);
2633
2634
rc = bnxt_init_sysctl_ctx(softc);
2635
if (rc)
2636
goto init_sysctl_failed;
2637
if (BNXT_PF(softc)) {
2638
rc = bnxt_create_nvram_sysctls(softc->nvm_info);
2639
if (rc)
2640
goto failed;
2641
}
2642
2643
arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
2644
softc->vnic_info.rss_hash_type =
2645
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
2646
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
2647
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
2648
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
2649
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
2650
HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
2651
rc = bnxt_create_config_sysctls_pre(softc);
2652
if (rc)
2653
goto failed;
2654
2655
rc = bnxt_create_hw_lro_sysctls(softc);
2656
if (rc)
2657
goto failed;
2658
2659
rc = bnxt_create_pause_fc_sysctls(softc);
2660
if (rc)
2661
goto failed;
2662
2663
rc = bnxt_create_dcb_sysctls(softc);
2664
if (rc)
2665
goto failed;
2666
2667
set_bit(BNXT_STATE_OPEN, &softc->state);
2668
INIT_WORK(&softc->sp_task, bnxt_sp_task);
2669
INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
2670
2671
/* Initialize the vlan list */
2672
SLIST_INIT(&softc->vnic_info.vlan_tags);
2673
softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
2674
softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
2675
M_WAITOK|M_ZERO);
2676
2677
if (BNXT_PF(softc)) {
2678
const char *part_num;
2679
2680
if (pci_get_vpd_readonly(softc->dev, "PN", &part_num) == 0)
2681
snprintf(softc->board_partno, sizeof(softc->board_partno), "%s", part_num);
2682
}
2683
2684
return (rc);
2685
2686
failed:
2687
bnxt_free_sysctl_ctx(softc);
2688
init_sysctl_failed:
2689
bnxt_hwrm_func_drv_unrgtr(softc, false);
2690
if (BNXT_PF(softc))
2691
free(softc->nvm_info, M_DEVBUF);
2692
nvm_alloc_fail:
2693
bnxt_free_hwrm_short_cmd_req(softc);
2694
hwrm_short_cmd_alloc_fail:
2695
ver_fail:
2696
free(softc->ver_info, M_DEVBUF);
2697
ver_alloc_fail:
2698
bnxt_free_hwrm_dma_mem(softc);
2699
dma_fail:
2700
BNXT_HWRM_LOCK_DESTROY(softc);
2701
if (softc->pdev)
2702
linux_pci_detach_device(softc->pdev);
2703
pci_attach_fail:
2704
kfree(softc->pdev);
2705
softc->pdev = NULL;
2706
free_pci_map:
2707
bnxt_pci_mapping_free(softc);
2708
pci_map_fail:
2709
pci_disable_busmaster(softc->dev);
2710
return (rc);
2711
}
2712
2713
static int
2714
bnxt_attach_post(if_ctx_t ctx)
2715
{
2716
struct bnxt_softc *softc = iflib_get_softc(ctx);
2717
if_t ifp = iflib_get_ifp(ctx);
2718
int rc;
2719
2720
softc->ifp = ifp;
2721
bnxt_create_config_sysctls_post(softc);
2722
2723
/* Update link state etc... */
2724
rc = bnxt_probe_phy(softc);
2725
if (rc)
2726
goto failed;
2727
2728
/* Needs to be done after probing the phy */
2729
bnxt_create_ver_sysctls(softc);
2730
ifmedia_removeall(softc->media);
2731
bnxt_add_media_types(softc);
2732
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
2733
2734
softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
2735
ETHER_CRC_LEN;
2736
2737
softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2738
bnxt_dcb_init(softc);
2739
bnxt_rdma_aux_device_init(softc);
2740
2741
failed:
2742
return rc;
2743
}
2744
2745
static int
2746
bnxt_detach(if_ctx_t ctx)
2747
{
2748
struct bnxt_softc *softc = iflib_get_softc(ctx);
2749
struct bnxt_vlan_tag *tag;
2750
struct bnxt_vlan_tag *tmp;
2751
int i;
2752
2753
bnxt_rdma_aux_device_uninit(softc);
2754
cancel_delayed_work_sync(&softc->fw_reset_task);
2755
cancel_work_sync(&softc->sp_task);
2756
bnxt_dcb_free(softc);
2757
SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
2758
bnxt_num_pfs--;
2759
bnxt_wol_config(ctx);
2760
bnxt_do_disable_intr(&softc->def_cp_ring);
2761
bnxt_free_sysctl_ctx(softc);
2762
bnxt_hwrm_func_reset(softc);
2763
bnxt_free_ctx_mem(softc);
2764
bnxt_clear_ids(softc);
2765
iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2766
/* We need to free() these here... */
2767
for (i = softc->nrxqsets-1; i>=0; i--) {
2768
if (BNXT_CHIP_P5_PLUS(softc))
2769
iflib_irq_free(ctx, &softc->nq_rings[i].irq);
2770
else
2771
iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2772
2773
}
2774
iflib_dma_free(&softc->vnic_info.mc_list);
2775
iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
2776
iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
2777
if (softc->vnic_info.vlan_tag_list.idi_vaddr)
2778
iflib_dma_free(&softc->vnic_info.vlan_tag_list);
2779
SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
2780
free(tag, M_DEVBUF);
2781
iflib_dma_free(&softc->def_cp_ring_mem);
2782
for (i = 0; i < softc->nrxqsets; i++)
2783
free(softc->rx_rings[i].tpa_start, M_DEVBUF);
2784
free(softc->ver_info, M_DEVBUF);
2785
if (BNXT_PF(softc))
2786
free(softc->nvm_info, M_DEVBUF);
2787
2788
bnxt_hwrm_func_drv_unrgtr(softc, false);
2789
bnxt_free_hwrm_dma_mem(softc);
2790
bnxt_free_hwrm_short_cmd_req(softc);
2791
BNXT_HWRM_LOCK_DESTROY(softc);
2792
2793
if (!bnxt_num_pfs && bnxt_pf_wq)
2794
destroy_workqueue(bnxt_pf_wq);
2795
2796
if (softc->pdev)
2797
linux_pci_detach_device(softc->pdev);
2798
free(softc->state_bv, M_DEVBUF);
2799
pci_disable_busmaster(softc->dev);
2800
bnxt_pci_mapping_free(softc);
2801
2802
return 0;
2803
}
2804
2805
static void
2806
bnxt_hwrm_resource_free(struct bnxt_softc *softc)
2807
{
2808
int i, rc = 0;
2809
2810
rc = bnxt_hwrm_ring_free(softc,
2811
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2812
&softc->def_cp_ring.ring,
2813
(uint16_t)HWRM_NA_SIGNATURE);
2814
if (rc)
2815
goto fail;
2816
2817
for (i = 0; i < softc->ntxqsets; i++) {
2818
rc = bnxt_hwrm_ring_free(softc,
2819
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
2820
&softc->tx_rings[i],
2821
softc->tx_cp_rings[i].ring.phys_id);
2822
if (rc)
2823
goto fail;
2824
2825
rc = bnxt_hwrm_ring_free(softc,
2826
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2827
&softc->tx_cp_rings[i].ring,
2828
(uint16_t)HWRM_NA_SIGNATURE);
2829
if (rc)
2830
goto fail;
2831
2832
rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
2833
if (rc)
2834
goto fail;
2835
}
2836
rc = bnxt_hwrm_free_filter(softc);
2837
if (rc)
2838
goto fail;
2839
2840
rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
2841
if (rc)
2842
goto fail;
2843
2844
rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
2845
if (rc)
2846
goto fail;
2847
2848
for (i = 0; i < softc->nrxqsets; i++) {
2849
rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
2850
if (rc)
2851
goto fail;
2852
2853
rc = bnxt_hwrm_ring_free(softc,
2854
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
2855
&softc->ag_rings[i],
2856
(uint16_t)HWRM_NA_SIGNATURE);
2857
if (rc)
2858
goto fail;
2859
2860
rc = bnxt_hwrm_ring_free(softc,
2861
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
2862
&softc->rx_rings[i],
2863
softc->rx_cp_rings[i].ring.phys_id);
2864
if (rc)
2865
goto fail;
2866
2867
rc = bnxt_hwrm_ring_free(softc,
2868
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
2869
&softc->rx_cp_rings[i].ring,
2870
(uint16_t)HWRM_NA_SIGNATURE);
2871
if (rc)
2872
goto fail;
2873
2874
if (BNXT_CHIP_P5_PLUS(softc)) {
2875
rc = bnxt_hwrm_ring_free(softc,
2876
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
2877
&softc->nq_rings[i].ring,
2878
(uint16_t)HWRM_NA_SIGNATURE);
2879
if (rc)
2880
goto fail;
2881
}
2882
2883
rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
2884
if (rc)
2885
goto fail;
2886
}
2887
2888
fail:
2889
return;
2890
}
2891
2892
2893
static void
2894
bnxt_func_reset(struct bnxt_softc *softc)
2895
{
2896
2897
if (!BNXT_CHIP_P5_PLUS(softc)) {
2898
bnxt_hwrm_func_reset(softc);
2899
return;
2900
}
2901
2902
bnxt_hwrm_resource_free(softc);
2903
return;
2904
}
2905
2906
static void
2907
bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
2908
{
2909
uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
2910
int i, j;
2911
2912
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
2913
if (BNXT_CHIP_P5_PLUS(softc)) {
2914
rgt[i++] = htole16(softc->rx_rings[j].phys_id);
2915
rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
2916
} else {
2917
rgt[i] = htole16(softc->grp_info[j].grp_id);
2918
}
2919
if (++j == softc->nrxqsets)
2920
j = 0;
2921
}
2922
}
2923
2924
static void bnxt_get_port_module_status(struct bnxt_softc *softc)
2925
{
2926
struct bnxt_link_info *link_info = &softc->link_info;
2927
struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
2928
uint8_t module_status;
2929
2930
if (bnxt_update_link(softc, false))
2931
return;
2932
2933
module_status = link_info->module_status;
2934
switch (module_status) {
2935
case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX:
2936
case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN:
2937
case HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG:
2938
device_printf(softc->dev, "Unqualified SFP+ module detected on port %d\n",
2939
softc->pf.port_id);
2940
if (softc->hwrm_spec_code >= 0x10201) {
2941
device_printf(softc->dev, "Module part number %s\n",
2942
resp->phy_vendor_partnumber);
2943
}
2944
if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX)
2945
device_printf(softc->dev, "TX is disabled\n");
2946
if (module_status == HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN)
2947
device_printf(softc->dev, "SFP+ module is shutdown\n");
2948
}
2949
}
2950
2951
static void bnxt_aux_dev_free(struct bnxt_softc *softc)
2952
{
2953
kfree(softc->aux_dev);
2954
softc->aux_dev = NULL;
2955
}
2956
2957
static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
2958
{
2959
struct bnxt_aux_dev *bnxt_adev;
2960
2961
msleep(1000 * 2);
2962
bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
2963
if (!bnxt_adev)
2964
return ERR_PTR(-ENOMEM);
2965
2966
return bnxt_adev;
2967
}
2968
2969
static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
2970
{
2971
struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
2972
2973
/* Skip if no auxiliary device init was done. */
2974
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2975
return;
2976
2977
if (IS_ERR_OR_NULL(bnxt_adev))
2978
return;
2979
2980
bnxt_rdma_aux_device_del(softc);
2981
2982
if (bnxt_adev->id >= 0)
2983
ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
2984
2985
bnxt_aux_dev_free(softc);
2986
}
2987
2988
static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
2989
{
2990
int rc;
2991
2992
if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
2993
return;
2994
2995
softc->aux_dev = bnxt_aux_dev_init(softc);
2996
if (IS_ERR_OR_NULL(softc->aux_dev)) {
2997
device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
2998
goto skip_aux_init;
2999
}
3000
3001
softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
3002
if (softc->aux_dev->id < 0) {
3003
device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
3004
bnxt_aux_dev_free(softc);
3005
goto skip_aux_init;
3006
}
3007
3008
msleep(1000 * 2);
3009
/* If aux bus init fails, continue with netdev init. */
3010
rc = bnxt_rdma_aux_device_add(softc);
3011
if (rc) {
3012
device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
3013
msleep(1000 * 2);
3014
ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
3015
}
3016
device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
3017
__func__, __LINE__, softc->aux_dev->id);
3018
skip_aux_init:
3019
return;
3020
}
3021
3022
/* Device configuration */
3023
static void
3024
bnxt_init(if_ctx_t ctx)
3025
{
3026
struct bnxt_softc *softc = iflib_get_softc(ctx);
3027
struct ifmediareq ifmr;
3028
int i;
3029
int rc;
3030
3031
if (!BNXT_CHIP_P5_PLUS(softc)) {
3032
rc = bnxt_hwrm_func_reset(softc);
3033
if (rc)
3034
return;
3035
} else if (softc->is_dev_init) {
3036
bnxt_stop(ctx);
3037
}
3038
3039
softc->is_dev_init = true;
3040
bnxt_clear_ids(softc);
3041
3042
if (BNXT_CHIP_P5_PLUS(softc))
3043
goto skip_def_cp_ring;
3044
/* Allocate the default completion ring */
3045
softc->def_cp_ring.cons = UINT32_MAX;
3046
softc->def_cp_ring.v_bit = 1;
3047
bnxt_mark_cpr_invalid(&softc->def_cp_ring);
3048
rc = bnxt_hwrm_ring_alloc(softc,
3049
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3050
&softc->def_cp_ring.ring);
3051
bnxt_set_db_mask(softc, &softc->def_cp_ring.ring,
3052
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3053
if (rc)
3054
goto fail;
3055
skip_def_cp_ring:
3056
for (i = 0; i < softc->nrxqsets; i++) {
3057
/* Allocate the statistics context */
3058
rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
3059
softc->rx_stats[i].idi_paddr);
3060
if (rc)
3061
goto fail;
3062
3063
if (BNXT_CHIP_P5_PLUS(softc)) {
3064
/* Allocate the NQ */
3065
softc->nq_rings[i].cons = 0;
3066
softc->nq_rings[i].raw_cons = 0;
3067
softc->nq_rings[i].v_bit = 1;
3068
softc->nq_rings[i].last_idx = UINT32_MAX;
3069
bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
3070
rc = bnxt_hwrm_ring_alloc(softc,
3071
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
3072
&softc->nq_rings[i].ring);
3073
bnxt_set_db_mask(softc, &softc->nq_rings[i].ring,
3074
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ);
3075
if (rc)
3076
goto fail;
3077
3078
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3079
}
3080
/* Allocate the completion ring */
3081
softc->rx_cp_rings[i].cons = UINT32_MAX;
3082
softc->rx_cp_rings[i].raw_cons = UINT32_MAX;
3083
softc->rx_cp_rings[i].v_bit = 1;
3084
softc->rx_cp_rings[i].last_idx = UINT32_MAX;
3085
softc->rx_cp_rings[i].toggle = 0;
3086
bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
3087
rc = bnxt_hwrm_ring_alloc(softc,
3088
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3089
&softc->rx_cp_rings[i].ring);
3090
bnxt_set_db_mask(softc, &softc->rx_cp_rings[i].ring,
3091
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3092
if (rc)
3093
goto fail;
3094
3095
if (BNXT_CHIP_P5_PLUS(softc))
3096
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3097
3098
/* Allocate the RX ring */
3099
rc = bnxt_hwrm_ring_alloc(softc,
3100
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
3101
bnxt_set_db_mask(softc, &softc->rx_rings[i],
3102
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX);
3103
if (rc)
3104
goto fail;
3105
softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
3106
3107
/* Allocate the AG ring */
3108
rc = bnxt_hwrm_ring_alloc(softc,
3109
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
3110
&softc->ag_rings[i]);
3111
bnxt_set_db_mask(softc, &softc->ag_rings[i],
3112
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG);
3113
if (rc)
3114
goto fail;
3115
softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
3116
3117
/* Allocate the ring group */
3118
softc->grp_info[i].stats_ctx =
3119
softc->rx_cp_rings[i].stats_ctx_id;
3120
softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
3121
softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
3122
softc->grp_info[i].cp_ring_id =
3123
softc->rx_cp_rings[i].ring.phys_id;
3124
rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
3125
if (rc)
3126
goto fail;
3127
}
3128
3129
/* And now set the default CP / NQ ring for the async */
3130
rc = bnxt_cfg_async_cr(softc);
3131
if (rc)
3132
goto fail;
3133
3134
/* Allocate the VNIC RSS context */
3135
rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
3136
if (rc)
3137
goto fail;
3138
3139
/* Allocate the vnic */
3140
softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
3141
softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
3142
rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
3143
if (rc)
3144
goto fail;
3145
rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
3146
if (rc)
3147
goto fail;
3148
rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
3149
if (rc)
3150
goto fail;
3151
rc = bnxt_hwrm_set_filter(softc);
3152
if (rc)
3153
goto fail;
3154
3155
bnxt_rss_grp_tbl_init(softc);
3156
3157
rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
3158
softc->vnic_info.rss_hash_type);
3159
if (rc)
3160
goto fail;
3161
3162
rc = bnxt_hwrm_vnic_tpa_cfg(softc);
3163
if (rc)
3164
goto fail;
3165
3166
for (i = 0; i < softc->ntxqsets; i++) {
3167
/* Allocate the statistics context */
3168
rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
3169
softc->tx_stats[i].idi_paddr);
3170
if (rc)
3171
goto fail;
3172
3173
/* Allocate the completion ring */
3174
softc->tx_cp_rings[i].cons = UINT32_MAX;
3175
softc->tx_cp_rings[i].raw_cons = UINT32_MAX;
3176
softc->tx_cp_rings[i].v_bit = 1;
3177
softc->tx_cp_rings[i].toggle = 0;
3178
bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
3179
rc = bnxt_hwrm_ring_alloc(softc,
3180
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
3181
&softc->tx_cp_rings[i].ring);
3182
bnxt_set_db_mask(softc, &softc->tx_cp_rings[i].ring,
3183
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
3184
if (rc)
3185
goto fail;
3186
3187
if (BNXT_CHIP_P5_PLUS(softc))
3188
softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
3189
3190
/* Allocate the TX ring */
3191
rc = bnxt_hwrm_ring_alloc(softc,
3192
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
3193
&softc->tx_rings[i]);
3194
bnxt_set_db_mask(softc, &softc->tx_rings[i],
3195
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX);
3196
if (rc)
3197
goto fail;
3198
softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
3199
}
3200
3201
bnxt_do_enable_intr(&softc->def_cp_ring);
3202
bnxt_get_port_module_status(softc);
3203
bnxt_media_status(softc->ctx, &ifmr);
3204
bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3205
return;
3206
3207
fail:
3208
bnxt_func_reset(softc);
3209
bnxt_clear_ids(softc);
3210
return;
3211
}
3212
3213
static void
3214
bnxt_stop(if_ctx_t ctx)
3215
{
3216
struct bnxt_softc *softc = iflib_get_softc(ctx);
3217
3218
softc->is_dev_init = false;
3219
bnxt_do_disable_intr(&softc->def_cp_ring);
3220
bnxt_func_reset(softc);
3221
bnxt_clear_ids(softc);
3222
return;
3223
}
3224
3225
static u_int
3226
bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3227
{
3228
uint8_t *mta = arg;
3229
3230
if (cnt == BNXT_MAX_MC_ADDRS)
3231
return (1);
3232
3233
bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3234
3235
return (1);
3236
}
3237
3238
static void
3239
bnxt_multi_set(if_ctx_t ctx)
3240
{
3241
struct bnxt_softc *softc = iflib_get_softc(ctx);
3242
if_t ifp = iflib_get_ifp(ctx);
3243
uint8_t *mta;
3244
int mcnt;
3245
3246
mta = softc->vnic_info.mc_list.idi_vaddr;
3247
bzero(mta, softc->vnic_info.mc_list.idi_size);
3248
mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
3249
3250
if (mcnt > BNXT_MAX_MC_ADDRS) {
3251
softc->vnic_info.rx_mask |=
3252
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3253
bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3254
} else {
3255
softc->vnic_info.rx_mask &=
3256
~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3257
bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
3258
softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
3259
softc->vnic_info.mc_list_count = mcnt;
3260
softc->vnic_info.rx_mask |=
3261
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
3262
if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
3263
device_printf(softc->dev,
3264
"set_multi: rx_mask set failed\n");
3265
}
3266
}
3267
3268
static int
3269
bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
3270
{
3271
struct bnxt_softc *softc = iflib_get_softc(ctx);
3272
3273
if (mtu > BNXT_MAX_MTU)
3274
return EINVAL;
3275
3276
softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3277
softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
3278
return 0;
3279
}
3280
3281
static void
3282
bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
3283
{
3284
struct bnxt_softc *softc = iflib_get_softc(ctx);
3285
struct bnxt_link_info *link_info = &softc->link_info;
3286
struct ifmedia_entry *next;
3287
uint64_t target_baudrate = bnxt_get_baudrate(link_info);
3288
int active_media = IFM_UNKNOWN;
3289
3290
bnxt_update_link(softc, true);
3291
3292
ifmr->ifm_status = IFM_AVALID;
3293
ifmr->ifm_active = IFM_ETHER;
3294
3295
if (!link_info->link_up)
3296
return;
3297
3298
ifmr->ifm_status |= IFM_ACTIVE;
3299
if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3300
ifmr->ifm_active |= IFM_FDX;
3301
else
3302
ifmr->ifm_active |= IFM_HDX;
3303
3304
/*
3305
* Go through the list of supported media which got prepared
3306
* as part of bnxt_add_media_types() using api ifmedia_add().
3307
*/
3308
LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
3309
if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
3310
active_media = next->ifm_media;
3311
break;
3312
}
3313
}
3314
ifmr->ifm_active |= active_media;
3315
3316
if (link_info->flow_ctrl.rx)
3317
ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3318
if (link_info->flow_ctrl.tx)
3319
ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3320
3321
bnxt_report_link(softc);
3322
return;
3323
}
3324
3325
static int
3326
bnxt_media_change(if_ctx_t ctx)
3327
{
3328
struct bnxt_softc *softc = iflib_get_softc(ctx);
3329
struct ifmedia *ifm = iflib_get_media(ctx);
3330
struct ifmediareq ifmr;
3331
int rc;
3332
struct bnxt_link_info *link_info = &softc->link_info;
3333
3334
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3335
return EINVAL;
3336
3337
switch (IFM_SUBTYPE(ifm->ifm_media)) {
3338
case IFM_100_T:
3339
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3340
link_info->req_link_speed =
3341
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
3342
break;
3343
case IFM_1000_KX:
3344
case IFM_1000_SGMII:
3345
case IFM_1000_CX:
3346
case IFM_1000_SX:
3347
case IFM_1000_LX:
3348
3349
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3350
3351
if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) {
3352
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
3353
3354
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB) {
3355
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB;
3356
link_info->force_speed2_nrz = true;
3357
}
3358
3359
break;
3360
3361
case IFM_2500_KX:
3362
case IFM_2500_T:
3363
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3364
link_info->req_link_speed =
3365
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
3366
break;
3367
case IFM_10G_CR1:
3368
case IFM_10G_KR:
3369
case IFM_10G_LR:
3370
case IFM_10G_SR:
3371
3372
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3373
3374
if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB) {
3375
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
3376
3377
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB) {
3378
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB;
3379
link_info->force_speed2_nrz = true;
3380
}
3381
3382
break;
3383
case IFM_20G_KR2:
3384
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3385
link_info->req_link_speed =
3386
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
3387
break;
3388
case IFM_25G_CR:
3389
case IFM_25G_KR:
3390
case IFM_25G_SR:
3391
case IFM_25G_LR:
3392
3393
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3394
3395
if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB) {
3396
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
3397
3398
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB) {
3399
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB;
3400
link_info->force_speed2_nrz = true;
3401
}
3402
3403
break;
3404
3405
case IFM_40G_CR4:
3406
case IFM_40G_KR4:
3407
case IFM_40G_LR4:
3408
case IFM_40G_SR4:
3409
case IFM_40G_XLAUI:
3410
case IFM_40G_XLAUI_AC:
3411
3412
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3413
3414
if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB) {
3415
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
3416
3417
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB) {
3418
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB;
3419
link_info->force_speed2_nrz = true;
3420
}
3421
3422
break;
3423
3424
case IFM_50G_CR2:
3425
case IFM_50G_KR2:
3426
case IFM_50G_KR4:
3427
case IFM_50G_SR2:
3428
case IFM_50G_LR2:
3429
3430
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3431
3432
if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) {
3433
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
3434
3435
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB) {
3436
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB;
3437
link_info->force_speed2_nrz = true;
3438
}
3439
3440
break;
3441
3442
case IFM_50G_CP:
3443
case IFM_50G_LR:
3444
case IFM_50G_SR:
3445
case IFM_50G_KR_PAM4:
3446
3447
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3448
3449
if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
3450
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
3451
link_info->force_pam4_speed = true;
3452
3453
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56) {
3454
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56;
3455
link_info->force_pam4_56_speed2 = true;
3456
}
3457
3458
break;
3459
3460
case IFM_100G_CR4:
3461
case IFM_100G_KR4:
3462
case IFM_100G_LR4:
3463
case IFM_100G_SR4:
3464
case IFM_100G_AUI4:
3465
3466
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3467
3468
if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) {
3469
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
3470
3471
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB) {
3472
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB;
3473
link_info->force_speed2_nrz = true;
3474
}
3475
3476
break;
3477
3478
case IFM_100G_CP2:
3479
case IFM_100G_SR2:
3480
case IFM_100G_KR2_PAM4:
3481
case IFM_100G_AUI2:
3482
3483
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3484
3485
if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
3486
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
3487
link_info->force_pam4_speed = true;
3488
3489
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56) {
3490
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56;
3491
link_info->force_pam4_56_speed2 = true;
3492
}
3493
3494
break;
3495
3496
case IFM_100G_KR_PAM4:
3497
case IFM_100G_CR_PAM4:
3498
case IFM_100G_DR:
3499
case IFM_100G_AUI2_AC:
3500
3501
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3502
3503
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112) {
3504
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112;
3505
link_info->force_pam4_112_speed2 = true;
3506
}
3507
3508
break;
3509
3510
case IFM_200G_SR4:
3511
case IFM_200G_FR4:
3512
case IFM_200G_LR4:
3513
case IFM_200G_DR4:
3514
case IFM_200G_CR4_PAM4:
3515
case IFM_200G_KR4_PAM4:
3516
3517
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3518
3519
if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) {
3520
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
3521
link_info->force_pam4_speed = true;
3522
3523
} else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56) {
3524
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56;
3525
link_info->force_pam4_56_speed2 = true;
3526
}
3527
3528
break;
3529
3530
case IFM_200G_AUI4:
3531
3532
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3533
3534
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112) {
3535
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112;
3536
link_info->force_pam4_112_speed2 = true;
3537
}
3538
3539
break;
3540
3541
case IFM_400G_FR8:
3542
case IFM_400G_LR8:
3543
case IFM_400G_AUI8:
3544
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3545
3546
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56) {
3547
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56;
3548
link_info->force_pam4_56_speed2 = true;
3549
}
3550
3551
break;
3552
3553
case IFM_400G_AUI8_AC:
3554
case IFM_400G_DR4:
3555
link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
3556
3557
if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112) {
3558
link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112;
3559
link_info->force_pam4_112_speed2 = true;
3560
}
3561
3562
break;
3563
3564
case IFM_1000_T:
3565
link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
3566
link_info->autoneg |= BNXT_AUTONEG_SPEED;
3567
break;
3568
case IFM_10G_T:
3569
link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
3570
link_info->autoneg |= BNXT_AUTONEG_SPEED;
3571
break;
3572
default:
3573
device_printf(softc->dev,
3574
"Unsupported media type! Using auto\n");
3575
/* Fall-through */
3576
case IFM_AUTO:
3577
// Auto
3578
link_info->autoneg |= BNXT_AUTONEG_SPEED;
3579
break;
3580
}
3581
3582
rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
3583
bnxt_media_status(softc->ctx, &ifmr);
3584
return rc;
3585
}
3586
3587
static int
3588
bnxt_promisc_set(if_ctx_t ctx, int flags)
3589
{
3590
struct bnxt_softc *softc = iflib_get_softc(ctx);
3591
if_t ifp = iflib_get_ifp(ctx);
3592
int rc;
3593
3594
if (if_getflags(ifp) & IFF_ALLMULTI ||
3595
if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
3596
softc->vnic_info.rx_mask |=
3597
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3598
else
3599
softc->vnic_info.rx_mask &=
3600
~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
3601
3602
if (if_getflags(ifp) & IFF_PROMISC)
3603
softc->vnic_info.rx_mask |=
3604
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
3605
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
3606
else
3607
softc->vnic_info.rx_mask &=
3608
~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
3609
3610
rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
3611
3612
return rc;
3613
}
3614
3615
static uint64_t
3616
bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
3617
{
3618
if_t ifp = iflib_get_ifp(ctx);
3619
3620
if (cnt < IFCOUNTERS)
3621
return if_get_counter_default(ifp, cnt);
3622
3623
return 0;
3624
}
3625
3626
static void
3627
bnxt_update_admin_status(if_ctx_t ctx)
3628
{
3629
struct bnxt_softc *softc = iflib_get_softc(ctx);
3630
3631
/*
3632
* When SR-IOV is enabled, avoid each VF sending this HWRM
3633
* request every sec with which firmware timeouts can happen
3634
*/
3635
if (!BNXT_PF(softc))
3636
return;
3637
3638
bnxt_hwrm_port_qstats(softc);
3639
3640
if (BNXT_CHIP_P5_PLUS(softc) &&
3641
(softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
3642
bnxt_hwrm_port_qstats_ext(softc);
3643
3644
if (BNXT_CHIP_P5_PLUS(softc)) {
3645
struct ifmediareq ifmr;
3646
3647
if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
3648
bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3649
bnxt_media_status(softc->ctx, &ifmr);
3650
}
3651
}
3652
3653
return;
3654
}
3655
3656
static void
3657
bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
3658
{
3659
3660
struct bnxt_softc *softc = iflib_get_softc(ctx);
3661
uint64_t ticks_now = ticks;
3662
3663
/* Schedule bnxt_update_admin_status() once per sec */
3664
if (ticks_now - softc->admin_ticks >= hz) {
3665
softc->admin_ticks = ticks_now;
3666
iflib_admin_intr_deferred(ctx);
3667
}
3668
3669
return;
3670
}
3671
3672
static void inline
3673
bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
3674
{
3675
struct bnxt_softc *softc = cpr->ring.softc;
3676
3677
3678
if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3679
return;
3680
3681
if (BNXT_CHIP_P5_PLUS(softc))
3682
softc->db_ops.bnxt_db_nq(cpr, 1);
3683
else
3684
softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3685
}
3686
3687
static void inline
3688
bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
3689
{
3690
struct bnxt_softc *softc = cpr->ring.softc;
3691
3692
if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
3693
return;
3694
3695
if (BNXT_CHIP_P5_PLUS(softc))
3696
softc->db_ops.bnxt_db_nq(cpr, 0);
3697
else
3698
softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3699
}
3700
3701
/* Enable all interrupts */
3702
static void
3703
bnxt_intr_enable(if_ctx_t ctx)
3704
{
3705
struct bnxt_softc *softc = iflib_get_softc(ctx);
3706
int i;
3707
3708
bnxt_do_enable_intr(&softc->def_cp_ring);
3709
for (i = 0; i < softc->nrxqsets; i++)
3710
if (BNXT_CHIP_P5_PLUS(softc))
3711
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
3712
else
3713
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
3714
3715
return;
3716
}
3717
3718
/* Enable interrupt for a single queue */
3719
static int
3720
bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3721
{
3722
struct bnxt_softc *softc = iflib_get_softc(ctx);
3723
3724
if (BNXT_CHIP_P5_PLUS(softc))
3725
softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3726
else
3727
softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
3728
3729
return 0;
3730
}
3731
3732
static void
3733
bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
3734
{
3735
device_printf(softc->dev, "cmd sequence number %d\n",
3736
cmd_cmpl->sequence_id);
3737
return;
3738
}
3739
3740
static void
3741
bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
3742
{
3743
struct bnxt_softc *softc = cpr->ring.softc;
3744
uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
3745
3746
switch (type) {
3747
case HWRM_CMPL_TYPE_HWRM_DONE:
3748
bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
3749
break;
3750
case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
3751
bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
3752
break;
3753
default:
3754
device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
3755
__FUNCTION__, __LINE__, type);
3756
break;
3757
}
3758
}
3759
3760
void
3761
process_nq(struct bnxt_softc *softc, uint16_t nqid)
3762
{
3763
struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
3764
nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
3765
struct bnxt_cp_ring *tx_cpr = &softc->tx_cp_rings[nqid];
3766
struct bnxt_cp_ring *rx_cpr = &softc->rx_cp_rings[nqid];
3767
bool v_bit = cpr->v_bit;
3768
uint32_t cons = cpr->cons;
3769
uint32_t raw_cons = cpr->raw_cons;
3770
uint16_t nq_type, nqe_cnt = 0;
3771
3772
while (1) {
3773
if (!NQ_VALID(&cmp[cons], v_bit)) {
3774
goto done;
3775
}
3776
3777
nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
3778
3779
if (NQE_CN_TYPE(nq_type) != NQ_CN_TYPE_CQ_NOTIFICATION) {
3780
bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
3781
} else {
3782
tx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3783
rx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
3784
}
3785
3786
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3787
raw_cons++;
3788
nqe_cnt++;
3789
}
3790
done:
3791
if (nqe_cnt) {
3792
cpr->cons = cons;
3793
cpr->raw_cons = raw_cons;
3794
cpr->v_bit = v_bit;
3795
}
3796
}
3797
3798
static int
3799
bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
3800
{
3801
struct bnxt_softc *softc = iflib_get_softc(ctx);
3802
3803
if (BNXT_CHIP_P5_PLUS(softc)) {
3804
process_nq(softc, qid);
3805
softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
3806
}
3807
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
3808
return 0;
3809
}
3810
3811
/* Disable all interrupts */
3812
static void
3813
bnxt_disable_intr(if_ctx_t ctx)
3814
{
3815
struct bnxt_softc *softc = iflib_get_softc(ctx);
3816
int i;
3817
3818
/*
3819
* NOTE: These TX interrupts should never get enabled, so don't
3820
* update the index
3821
*/
3822
for (i = 0; i < softc->nrxqsets; i++)
3823
if (BNXT_CHIP_P5_PLUS(softc))
3824
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
3825
else
3826
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
3827
3828
3829
return;
3830
}
3831
3832
static int
3833
bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
3834
{
3835
struct bnxt_softc *softc = iflib_get_softc(ctx);
3836
struct bnxt_cp_ring *ring;
3837
struct if_irq *irq;
3838
uint16_t id;
3839
int rc;
3840
int i;
3841
char irq_name[16];
3842
3843
if (BNXT_CHIP_P5_PLUS(softc))
3844
goto skip_default_cp;
3845
3846
rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
3847
softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
3848
bnxt_handle_def_cp, softc, 0, "def_cp");
3849
if (rc) {
3850
device_printf(iflib_get_dev(ctx),
3851
"Failed to register default completion ring handler\n");
3852
return rc;
3853
}
3854
3855
skip_default_cp:
3856
for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
3857
if (BNXT_CHIP_P5_PLUS(softc)) {
3858
irq = &softc->nq_rings[i].irq;
3859
id = softc->nq_rings[i].ring.id;
3860
ring = &softc->nq_rings[i];
3861
} else {
3862
irq = &softc->rx_cp_rings[i].irq;
3863
id = softc->rx_cp_rings[i].ring.id ;
3864
ring = &softc->rx_cp_rings[i];
3865
}
3866
snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
3867
rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
3868
bnxt_handle_isr, ring, i, irq_name);
3869
if (rc) {
3870
device_printf(iflib_get_dev(ctx),
3871
"Failed to register RX completion ring handler\n");
3872
i--;
3873
goto fail;
3874
}
3875
}
3876
3877
for (i=0; i<softc->scctx->isc_ntxqsets; i++)
3878
iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
3879
3880
return rc;
3881
3882
fail:
3883
for (; i>=0; i--)
3884
iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
3885
iflib_irq_free(ctx, &softc->def_cp_ring.irq);
3886
return rc;
3887
}
3888
3889
/*
3890
* We're explicitly allowing duplicates here. They will need to be
3891
* removed as many times as they are added.
3892
*/
3893
static void
3894
bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
3895
{
3896
struct bnxt_softc *softc = iflib_get_softc(ctx);
3897
struct bnxt_vlan_tag *new_tag;
3898
3899
new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
3900
if (new_tag == NULL)
3901
return;
3902
new_tag->tag = vtag;
3903
new_tag->filter_id = -1;
3904
SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
3905
};
3906
3907
static void
3908
bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
3909
{
3910
struct bnxt_softc *softc = iflib_get_softc(ctx);
3911
struct bnxt_vlan_tag *vlan_tag;
3912
3913
SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
3914
if (vlan_tag->tag == vtag) {
3915
SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
3916
bnxt_vlan_tag, next);
3917
free(vlan_tag, M_DEVBUF);
3918
break;
3919
}
3920
}
3921
}
3922
3923
static int
3924
bnxt_wol_config(if_ctx_t ctx)
3925
{
3926
struct bnxt_softc *softc = iflib_get_softc(ctx);
3927
if_t ifp = iflib_get_ifp(ctx);
3928
3929
if (!softc)
3930
return -EBUSY;
3931
3932
if (!bnxt_wol_supported(softc))
3933
return -ENOTSUP;
3934
3935
if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
3936
if (!softc->wol) {
3937
if (bnxt_hwrm_alloc_wol_fltr(softc))
3938
return -EBUSY;
3939
softc->wol = 1;
3940
}
3941
} else {
3942
if (softc->wol) {
3943
if (bnxt_hwrm_free_wol_fltr(softc))
3944
return -EBUSY;
3945
softc->wol = 0;
3946
}
3947
}
3948
3949
return 0;
3950
}
3951
3952
static bool
3953
bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
3954
{
3955
switch (event) {
3956
case IFLIB_RESTART_VLAN_CONFIG:
3957
default:
3958
return (false);
3959
}
3960
}
3961
3962
static int
3963
bnxt_shutdown(if_ctx_t ctx)
3964
{
3965
bnxt_wol_config(ctx);
3966
return 0;
3967
}
3968
3969
static int
3970
bnxt_suspend(if_ctx_t ctx)
3971
{
3972
bnxt_wol_config(ctx);
3973
return 0;
3974
}
3975
3976
static int
3977
bnxt_resume(if_ctx_t ctx)
3978
{
3979
struct bnxt_softc *softc = iflib_get_softc(ctx);
3980
3981
bnxt_get_wol_settings(softc);
3982
return 0;
3983
}
3984
3985
static int
3986
bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
3987
{
3988
struct bnxt_softc *softc = iflib_get_softc(ctx);
3989
struct ifreq *ifr = (struct ifreq *)data;
3990
struct bnxt_ioctl_header *ioh;
3991
size_t iol;
3992
int rc = ENOTSUP;
3993
struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
3994
3995
switch (command) {
3996
case SIOCGPRIVATE_0:
3997
if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
3998
goto exit;
3999
4000
ioh = ifr_buffer_get_buffer(ifr);
4001
iol = ifr_buffer_get_length(ifr);
4002
if (iol > sizeof(iod_storage))
4003
return (EINVAL);
4004
4005
if ((rc = copyin(ioh, iod, iol)) != 0)
4006
goto exit;
4007
4008
switch (iod->hdr.type) {
4009
case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
4010
{
4011
struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
4012
&iod->find;
4013
4014
rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
4015
&find->ordinal, find->ext, &find->index,
4016
find->use_index, find->search_opt,
4017
&find->data_length, &find->item_length,
4018
&find->fw_ver);
4019
if (rc) {
4020
iod->hdr.rc = rc;
4021
rc = copyout(&iod->hdr.rc, &ioh->rc,
4022
sizeof(ioh->rc));
4023
} else {
4024
iod->hdr.rc = 0;
4025
rc = copyout(iod, ioh, iol);
4026
}
4027
4028
goto exit;
4029
}
4030
case BNXT_HWRM_NVM_READ:
4031
{
4032
struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
4033
struct iflib_dma_info dma_data;
4034
size_t offset;
4035
size_t remain;
4036
size_t csize;
4037
4038
/*
4039
* Some HWRM versions can't read more than 0x8000 bytes
4040
*/
4041
rc = iflib_dma_alloc(softc->ctx,
4042
min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
4043
if (rc)
4044
break;
4045
for (remain = rd->length, offset = 0;
4046
remain && offset < rd->length; offset += 0x8000) {
4047
csize = min(remain, 0x8000);
4048
rc = bnxt_hwrm_nvm_read(softc, rd->index,
4049
rd->offset + offset, csize, &dma_data);
4050
if (rc) {
4051
iod->hdr.rc = rc;
4052
rc = copyout(&iod->hdr.rc, &ioh->rc,
4053
sizeof(ioh->rc));
4054
break;
4055
} else {
4056
rc = copyout(dma_data.idi_vaddr,
4057
rd->data + offset, csize);
4058
iod->hdr.rc = rc;
4059
}
4060
remain -= csize;
4061
}
4062
if (rc == 0)
4063
rc = copyout(iod, ioh, iol);
4064
4065
iflib_dma_free(&dma_data);
4066
goto exit;
4067
}
4068
case BNXT_HWRM_FW_RESET:
4069
{
4070
struct bnxt_ioctl_hwrm_fw_reset *rst =
4071
&iod->reset;
4072
4073
rc = bnxt_hwrm_fw_reset(softc, rst->processor,
4074
&rst->selfreset);
4075
if (rc) {
4076
iod->hdr.rc = rc;
4077
rc = copyout(&iod->hdr.rc, &ioh->rc,
4078
sizeof(ioh->rc));
4079
} else {
4080
iod->hdr.rc = 0;
4081
rc = copyout(iod, ioh, iol);
4082
}
4083
4084
goto exit;
4085
}
4086
case BNXT_HWRM_FW_QSTATUS:
4087
{
4088
struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
4089
&iod->status;
4090
4091
rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
4092
&qstat->selfreset);
4093
if (rc) {
4094
iod->hdr.rc = rc;
4095
rc = copyout(&iod->hdr.rc, &ioh->rc,
4096
sizeof(ioh->rc));
4097
} else {
4098
iod->hdr.rc = 0;
4099
rc = copyout(iod, ioh, iol);
4100
}
4101
4102
goto exit;
4103
}
4104
case BNXT_HWRM_NVM_WRITE:
4105
{
4106
struct bnxt_ioctl_hwrm_nvm_write *wr =
4107
&iod->write;
4108
4109
rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
4110
wr->type, wr->ordinal, wr->ext, wr->attr,
4111
wr->option, wr->data_length, wr->keep,
4112
&wr->item_length, &wr->index);
4113
if (rc) {
4114
iod->hdr.rc = rc;
4115
rc = copyout(&iod->hdr.rc, &ioh->rc,
4116
sizeof(ioh->rc));
4117
}
4118
else {
4119
iod->hdr.rc = 0;
4120
rc = copyout(iod, ioh, iol);
4121
}
4122
4123
goto exit;
4124
}
4125
case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
4126
{
4127
struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
4128
&iod->erase;
4129
4130
rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
4131
if (rc) {
4132
iod->hdr.rc = rc;
4133
rc = copyout(&iod->hdr.rc, &ioh->rc,
4134
sizeof(ioh->rc));
4135
} else {
4136
iod->hdr.rc = 0;
4137
rc = copyout(iod, ioh, iol);
4138
}
4139
4140
goto exit;
4141
}
4142
case BNXT_HWRM_NVM_GET_DIR_INFO:
4143
{
4144
struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
4145
&iod->dir_info;
4146
4147
rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
4148
&info->entry_length);
4149
if (rc) {
4150
iod->hdr.rc = rc;
4151
rc = copyout(&iod->hdr.rc, &ioh->rc,
4152
sizeof(ioh->rc));
4153
} else {
4154
iod->hdr.rc = 0;
4155
rc = copyout(iod, ioh, iol);
4156
}
4157
4158
goto exit;
4159
}
4160
case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
4161
{
4162
struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
4163
&iod->dir_entries;
4164
struct iflib_dma_info dma_data;
4165
4166
rc = iflib_dma_alloc(softc->ctx, get->max_size,
4167
&dma_data, BUS_DMA_NOWAIT);
4168
if (rc)
4169
break;
4170
rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
4171
&get->entry_length, &dma_data);
4172
if (rc) {
4173
iod->hdr.rc = rc;
4174
rc = copyout(&iod->hdr.rc, &ioh->rc,
4175
sizeof(ioh->rc));
4176
} else {
4177
rc = copyout(dma_data.idi_vaddr, get->data,
4178
get->entry_length * get->entries);
4179
iod->hdr.rc = rc;
4180
if (rc == 0)
4181
rc = copyout(iod, ioh, iol);
4182
}
4183
iflib_dma_free(&dma_data);
4184
4185
goto exit;
4186
}
4187
case BNXT_HWRM_NVM_VERIFY_UPDATE:
4188
{
4189
struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
4190
&iod->verify;
4191
4192
rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
4193
vrfy->ordinal, vrfy->ext);
4194
if (rc) {
4195
iod->hdr.rc = rc;
4196
rc = copyout(&iod->hdr.rc, &ioh->rc,
4197
sizeof(ioh->rc));
4198
} else {
4199
iod->hdr.rc = 0;
4200
rc = copyout(iod, ioh, iol);
4201
}
4202
4203
goto exit;
4204
}
4205
case BNXT_HWRM_NVM_INSTALL_UPDATE:
4206
{
4207
struct bnxt_ioctl_hwrm_nvm_install_update *inst =
4208
&iod->install;
4209
4210
rc = bnxt_hwrm_nvm_install_update(softc,
4211
inst->install_type, &inst->installed_items,
4212
&inst->result, &inst->problem_item,
4213
&inst->reset_required);
4214
if (rc) {
4215
iod->hdr.rc = rc;
4216
rc = copyout(&iod->hdr.rc, &ioh->rc,
4217
sizeof(ioh->rc));
4218
} else {
4219
iod->hdr.rc = 0;
4220
rc = copyout(iod, ioh, iol);
4221
}
4222
4223
goto exit;
4224
}
4225
case BNXT_HWRM_NVM_MODIFY:
4226
{
4227
struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
4228
4229
rc = bnxt_hwrm_nvm_modify(softc, mod->index,
4230
mod->offset, mod->data, true, mod->length);
4231
if (rc) {
4232
iod->hdr.rc = rc;
4233
rc = copyout(&iod->hdr.rc, &ioh->rc,
4234
sizeof(ioh->rc));
4235
} else {
4236
iod->hdr.rc = 0;
4237
rc = copyout(iod, ioh, iol);
4238
}
4239
4240
goto exit;
4241
}
4242
case BNXT_HWRM_FW_GET_TIME:
4243
{
4244
struct bnxt_ioctl_hwrm_fw_get_time *gtm =
4245
&iod->get_time;
4246
4247
rc = bnxt_hwrm_fw_get_time(softc, &gtm->year,
4248
&gtm->month, &gtm->day, &gtm->hour, &gtm->minute,
4249
&gtm->second, &gtm->millisecond, &gtm->zone);
4250
if (rc) {
4251
iod->hdr.rc = rc;
4252
rc = copyout(&iod->hdr.rc, &ioh->rc,
4253
sizeof(ioh->rc));
4254
} else {
4255
iod->hdr.rc = 0;
4256
rc = copyout(iod, ioh, iol);
4257
}
4258
4259
goto exit;
4260
}
4261
case BNXT_HWRM_FW_SET_TIME:
4262
{
4263
struct bnxt_ioctl_hwrm_fw_set_time *stm =
4264
&iod->set_time;
4265
4266
rc = bnxt_hwrm_fw_set_time(softc, stm->year,
4267
stm->month, stm->day, stm->hour, stm->minute,
4268
stm->second, stm->millisecond, stm->zone);
4269
if (rc) {
4270
iod->hdr.rc = rc;
4271
rc = copyout(&iod->hdr.rc, &ioh->rc,
4272
sizeof(ioh->rc));
4273
} else {
4274
iod->hdr.rc = 0;
4275
rc = copyout(iod, ioh, iol);
4276
}
4277
4278
goto exit;
4279
}
4280
}
4281
break;
4282
}
4283
4284
exit:
4285
return rc;
4286
}
4287
4288
static int
4289
bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
4290
{
4291
struct bnxt_softc *softc = iflib_get_softc(ctx);
4292
uint8_t *data = i2c->data;
4293
int rc;
4294
4295
/* No point in going further if phy status indicates
4296
* module is not inserted or if it is powered down or
4297
* if it is of type 10GBase-T
4298
*/
4299
if (softc->link_info.module_status >
4300
HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG)
4301
return -EOPNOTSUPP;
4302
4303
/* This feature is not supported in older firmware versions */
4304
if (!BNXT_CHIP_P5_PLUS(softc) ||
4305
(softc->hwrm_spec_code < 0x10202))
4306
return -EOPNOTSUPP;
4307
4308
4309
rc = bnxt_read_sfp_module_eeprom_info(softc, i2c->dev_addr, 0, 0, 0,
4310
i2c->offset, i2c->len, data);
4311
4312
return rc;
4313
}
4314
4315
/*
4316
* Support functions
4317
*/
4318
static int
4319
bnxt_probe_phy(struct bnxt_softc *softc)
4320
{
4321
struct bnxt_link_info *link_info = &softc->link_info;
4322
int rc = 0;
4323
4324
softc->phy_flags = 0;
4325
rc = bnxt_hwrm_phy_qcaps(softc);
4326
if (rc) {
4327
device_printf(softc->dev,
4328
"Probe phy can't get phy capabilities (rc: %x)\n", rc);
4329
return rc;
4330
}
4331
4332
rc = bnxt_update_link(softc, false);
4333
if (rc) {
4334
device_printf(softc->dev,
4335
"Probe phy can't update link (rc: %x)\n", rc);
4336
return (rc);
4337
}
4338
4339
bnxt_get_port_module_status(softc);
4340
4341
/*initialize the ethool setting copy with NVM settings */
4342
if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
4343
link_info->autoneg |= BNXT_AUTONEG_SPEED;
4344
4345
link_info->req_duplex = link_info->duplex_setting;
4346
4347
/* NRZ link speed */
4348
if (link_info->autoneg & BNXT_AUTONEG_SPEED)
4349
link_info->req_link_speed = link_info->auto_link_speeds;
4350
else
4351
link_info->req_link_speed = link_info->force_link_speed;
4352
4353
/* PAM4 link speed */
4354
if (link_info->auto_pam4_link_speeds)
4355
link_info->req_link_speed = link_info->auto_pam4_link_speeds;
4356
if (link_info->force_pam4_link_speed)
4357
link_info->req_link_speed = link_info->force_pam4_link_speed;
4358
4359
return (rc);
4360
}
4361
4362
static void
4363
add_media(struct bnxt_softc *softc, u8 media_type, u16 supported_NRZ_speeds,
4364
u16 supported_pam4_speeds, u16 supported_speeds2)
4365
{
4366
4367
switch (media_type) {
4368
case BNXT_MEDIA_CR:
4369
4370
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_CP);
4371
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_CP2);
4372
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
4373
4374
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_CR4);
4375
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_CR2);
4376
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_CR4);
4377
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_CR);
4378
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_CR1);
4379
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4380
/* thor2 nrz*/
4381
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_CR4);
4382
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_CR2);
4383
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_CR4);
4384
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_CR);
4385
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_CR1);
4386
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4387
/* thor2 PAM56 */
4388
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_CP);
4389
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_CP2);
4390
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_CR4_PAM4);
4391
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4392
/* thor2 PAM112 */
4393
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_CR_PAM4);
4394
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4395
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4396
4397
break;
4398
4399
case BNXT_MEDIA_LR:
4400
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_LR);
4401
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_LR4);
4402
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_LR4);
4403
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_LR2);
4404
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_LR4);
4405
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_LR);
4406
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_LR);
4407
/* thor2 nrz*/
4408
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_LR4);
4409
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_LR2);
4410
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_LR4);
4411
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_LR);
4412
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_LR);
4413
/* thor2 PAM56 */
4414
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4415
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4416
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4417
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_LR8);
4418
/* thor2 PAM112 */
4419
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4420
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4421
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4422
4423
break;
4424
4425
case BNXT_MEDIA_SR:
4426
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_SR);
4427
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_SR2);
4428
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_SR4);
4429
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_SR4);
4430
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_SR2);
4431
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_SR4);
4432
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_SR);
4433
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_SR);
4434
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SX);
4435
/* thor2 nrz*/
4436
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_SR4);
4437
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_SR2);
4438
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_SR4);
4439
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_SR);
4440
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_SR);
4441
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SX);
4442
/* thor2 PAM56 */
4443
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_SR);
4444
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_SR2);
4445
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_SR4);
4446
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
4447
/* thor2 PAM112 */
4448
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4449
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
4450
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_DR4);
4451
break;
4452
4453
case BNXT_MEDIA_ER:
4454
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_ER4);
4455
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_AUI4);
4456
/* thor2 PAM56 */
4457
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
4458
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
4459
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
4460
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_FR8);
4461
/* thor2 PAM112 */
4462
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
4463
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4_AC);
4464
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
4465
break;
4466
4467
case BNXT_MEDIA_KR:
4468
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
4469
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
4470
BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
4471
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_KR4);
4472
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR2);
4473
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR4);
4474
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_KR4);
4475
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_KR);
4476
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_20GB, IFM_20G_KR2);
4477
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4478
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4479
break;
4480
4481
case BNXT_MEDIA_AC:
4482
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_ACC);
4483
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_AOC);
4484
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI);
4485
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI_AC);
4486
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_ACC);
4487
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_AOC);
4488
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI);
4489
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI_AC);
4490
break;
4491
4492
case BNXT_MEDIA_BASECX:
4493
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
4494
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
4495
break;
4496
4497
case BNXT_MEDIA_BASET:
4498
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_T);
4499
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_T);
4500
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_T);
4501
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100MB, IFM_100_T);
4502
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10MB, IFM_10_T);
4503
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_T);
4504
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_T);
4505
break;
4506
4507
case BNXT_MEDIA_BASEKX:
4508
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
4509
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_KX);
4510
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
4511
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_KR);
4512
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_KX);
4513
break;
4514
4515
case BNXT_MEDIA_BASESGMII:
4516
BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SGMII);
4517
BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SGMII);
4518
break;
4519
4520
default:
4521
break;
4522
4523
}
4524
return;
4525
4526
}
4527
4528
static void
4529
bnxt_add_media_types(struct bnxt_softc *softc)
4530
{
4531
struct bnxt_link_info *link_info = &softc->link_info;
4532
uint16_t supported_NRZ_speeds = 0, supported_pam4_speeds = 0, supported_speeds2 = 0;
4533
uint8_t phy_type = get_phy_type(softc), media_type;
4534
4535
supported_NRZ_speeds = link_info->support_speeds;
4536
supported_speeds2 = link_info->support_speeds2;
4537
supported_pam4_speeds = link_info->support_pam4_speeds;
4538
4539
/* Auto is always supported */
4540
ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
4541
4542
if (softc->flags & BNXT_FLAG_NPAR)
4543
return;
4544
4545
switch (phy_type) {
4546
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
4547
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
4548
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
4549
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
4550
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
4551
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
4552
4553
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
4554
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
4555
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
4556
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR8:
4557
4558
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR:
4559
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR2:
4560
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR4:
4561
4562
media_type = BNXT_MEDIA_CR;
4563
break;
4564
4565
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
4566
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
4567
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
4568
4569
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
4570
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR2:
4571
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
4572
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR8:
4573
4574
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR:
4575
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR2:
4576
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR4:
4577
4578
media_type = BNXT_MEDIA_LR;
4579
break;
4580
4581
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
4582
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
4583
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
4584
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
4585
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
4586
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
4587
4588
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
4589
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR2:
4590
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
4591
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR8:
4592
4593
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR:
4594
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR2:
4595
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR4:
4596
4597
media_type = BNXT_MEDIA_SR;
4598
break;
4599
4600
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
4601
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
4602
4603
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASEER:
4604
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER2:
4605
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
4606
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER8:
4607
4608
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER:
4609
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER2:
4610
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4:
4611
4612
media_type = BNXT_MEDIA_ER;
4613
break;
4614
4615
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
4616
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
4617
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
4618
media_type = BNXT_MEDIA_KR;
4619
break;
4620
4621
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
4622
media_type = BNXT_MEDIA_AC;
4623
break;
4624
4625
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
4626
media_type = BNXT_MEDIA_BASECX;
4627
break;
4628
4629
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
4630
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
4631
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
4632
media_type = BNXT_MEDIA_BASET;
4633
break;
4634
4635
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
4636
media_type = BNXT_MEDIA_BASEKX;
4637
break;
4638
4639
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
4640
media_type = BNXT_MEDIA_BASESGMII;
4641
break;
4642
4643
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
4644
/* Only Autoneg is supported for TYPE_UNKNOWN */
4645
break;
4646
4647
default:
4648
/* Only Autoneg is supported for new phy type values */
4649
device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
4650
break;
4651
}
4652
4653
switch (link_info->sig_mode) {
4654
case BNXT_SIG_MODE_NRZ:
4655
if (supported_NRZ_speeds != 0)
4656
add_media(softc, media_type, supported_NRZ_speeds, 0, 0);
4657
else
4658
add_media(softc, media_type, 0, 0, supported_speeds2);
4659
break;
4660
case BNXT_SIG_MODE_PAM4:
4661
if (supported_pam4_speeds != 0)
4662
add_media(softc, media_type, 0, supported_pam4_speeds, 0);
4663
else
4664
add_media(softc, media_type, 0, 0, supported_speeds2);
4665
break;
4666
case BNXT_SIG_MODE_PAM4_112:
4667
add_media(softc, media_type, 0, 0, supported_speeds2);
4668
break;
4669
}
4670
4671
return;
4672
}
4673
4674
static int
4675
bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
4676
{
4677
uint32_t flag;
4678
4679
if (bar->res != NULL) {
4680
device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
4681
return EDOOFUS;
4682
}
4683
4684
bar->rid = PCIR_BAR(bar_num);
4685
flag = RF_ACTIVE;
4686
if (shareable)
4687
flag |= RF_SHAREABLE;
4688
4689
if ((bar->res =
4690
bus_alloc_resource_any(softc->dev,
4691
SYS_RES_MEMORY,
4692
&bar->rid,
4693
flag)) == NULL) {
4694
device_printf(softc->dev,
4695
"PCI BAR%d mapping failure\n", bar_num);
4696
return (ENXIO);
4697
}
4698
bar->tag = rman_get_bustag(bar->res);
4699
bar->handle = rman_get_bushandle(bar->res);
4700
bar->size = rman_get_size(bar->res);
4701
4702
return 0;
4703
}
4704
4705
static int
4706
bnxt_pci_mapping(struct bnxt_softc *softc)
4707
{
4708
int rc;
4709
4710
rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
4711
if (rc)
4712
return rc;
4713
4714
rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
4715
4716
return rc;
4717
}
4718
4719
static void
4720
bnxt_pci_mapping_free(struct bnxt_softc *softc)
4721
{
4722
if (softc->hwrm_bar.res != NULL)
4723
bus_release_resource(softc->dev, SYS_RES_MEMORY,
4724
softc->hwrm_bar.rid, softc->hwrm_bar.res);
4725
softc->hwrm_bar.res = NULL;
4726
4727
if (softc->doorbell_bar.res != NULL)
4728
bus_release_resource(softc->dev, SYS_RES_MEMORY,
4729
softc->doorbell_bar.rid, softc->doorbell_bar.res);
4730
softc->doorbell_bar.res = NULL;
4731
}
4732
4733
static int
4734
bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
4735
{
4736
struct bnxt_link_info *link_info = &softc->link_info;
4737
uint8_t link_up = link_info->link_up;
4738
int rc = 0;
4739
4740
rc = bnxt_hwrm_port_phy_qcfg(softc);
4741
if (rc)
4742
goto exit;
4743
4744
/* TODO: need to add more logic to report VF link */
4745
if (chng_link_state) {
4746
if (link_info->phy_link_status ==
4747
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
4748
link_info->link_up = 1;
4749
else
4750
link_info->link_up = 0;
4751
if (link_up != link_info->link_up)
4752
bnxt_report_link(softc);
4753
} else {
4754
/* always link down if not require to update link state */
4755
link_info->link_up = 0;
4756
}
4757
4758
exit:
4759
return rc;
4760
}
4761
4762
#define ETHTOOL_SPEED_1000 1000
4763
#define ETHTOOL_SPEED_10000 10000
4764
#define ETHTOOL_SPEED_20000 20000
4765
#define ETHTOOL_SPEED_25000 25000
4766
#define ETHTOOL_SPEED_40000 40000
4767
#define ETHTOOL_SPEED_50000 50000
4768
#define ETHTOOL_SPEED_100000 100000
4769
#define ETHTOOL_SPEED_200000 200000
4770
#define ETHTOOL_SPEED_UNKNOWN -1
4771
4772
static u32
4773
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
4774
{
4775
switch (fw_link_speed) {
4776
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
4777
return ETHTOOL_SPEED_1000;
4778
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
4779
return ETHTOOL_SPEED_10000;
4780
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
4781
return ETHTOOL_SPEED_20000;
4782
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
4783
return ETHTOOL_SPEED_25000;
4784
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
4785
return ETHTOOL_SPEED_40000;
4786
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
4787
return ETHTOOL_SPEED_50000;
4788
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
4789
return ETHTOOL_SPEED_100000;
4790
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
4791
return ETHTOOL_SPEED_200000;
4792
default:
4793
return ETHTOOL_SPEED_UNKNOWN;
4794
}
4795
}
4796
4797
void
4798
bnxt_report_link(struct bnxt_softc *softc)
4799
{
4800
struct bnxt_link_info *link_info = &softc->link_info;
4801
const char *duplex = NULL, *flow_ctrl = NULL;
4802
const char *signal_mode = "";
4803
4804
if(softc->edev) {
4805
softc->edev->espeed =
4806
bnxt_fw_to_ethtool_speed(link_info->link_speed);
4807
softc->edev->lanes = link_info->active_lanes;
4808
}
4809
4810
if (link_info->link_up == link_info->last_link_up) {
4811
if (!link_info->link_up)
4812
return;
4813
if ((link_info->duplex == link_info->last_duplex) &&
4814
(link_info->phy_type == link_info->last_phy_type) &&
4815
(!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
4816
return;
4817
}
4818
4819
if (link_info->link_up) {
4820
if (link_info->duplex ==
4821
HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
4822
duplex = "full duplex";
4823
else
4824
duplex = "half duplex";
4825
if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
4826
flow_ctrl = "FC - receive & transmit";
4827
else if (link_info->flow_ctrl.tx)
4828
flow_ctrl = "FC - transmit";
4829
else if (link_info->flow_ctrl.rx)
4830
flow_ctrl = "FC - receive";
4831
else
4832
flow_ctrl = "FC - none";
4833
4834
if (softc->link_info.phy_qcfg_resp.option_flags &
4835
HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
4836
uint8_t sig_mode = softc->link_info.active_fec_sig_mode &
4837
HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_MASK;
4838
switch (sig_mode) {
4839
case BNXT_SIG_MODE_NRZ:
4840
signal_mode = "(NRZ) ";
4841
break;
4842
case BNXT_SIG_MODE_PAM4:
4843
signal_mode = "(PAM4 56Gbps) ";
4844
break;
4845
case BNXT_SIG_MODE_PAM4_112:
4846
signal_mode = "(PAM4 112Gbps) ";
4847
break;
4848
default:
4849
break;
4850
}
4851
link_info->sig_mode = sig_mode;
4852
}
4853
4854
iflib_link_state_change(softc->ctx, LINK_STATE_UP,
4855
IF_Gbps(100));
4856
device_printf(softc->dev, "Link is UP %s %s, %s - %d Mbps \n", duplex, signal_mode,
4857
flow_ctrl, (link_info->link_speed * 100));
4858
} else {
4859
iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
4860
bnxt_get_baudrate(&softc->link_info));
4861
device_printf(softc->dev, "Link is Down\n");
4862
}
4863
4864
link_info->last_link_up = link_info->link_up;
4865
link_info->last_duplex = link_info->duplex;
4866
link_info->last_phy_type = link_info->phy_type;
4867
link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
4868
link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
4869
link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
4870
/* update media types */
4871
ifmedia_removeall(softc->media);
4872
bnxt_add_media_types(softc);
4873
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
4874
}
4875
4876
static int
4877
bnxt_handle_isr(void *arg)
4878
{
4879
struct bnxt_cp_ring *cpr = arg;
4880
struct bnxt_softc *softc = cpr->ring.softc;
4881
4882
cpr->int_count++;
4883
/* Disable further interrupts for this queue */
4884
if (!BNXT_CHIP_P5_PLUS(softc))
4885
softc->db_ops.bnxt_db_rx_cq(cpr, 0);
4886
4887
return FILTER_SCHEDULE_THREAD;
4888
}
4889
4890
static int
4891
bnxt_handle_def_cp(void *arg)
4892
{
4893
struct bnxt_softc *softc = arg;
4894
4895
softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
4896
iflib_config_task_enqueue(softc->ctx, &softc->def_cp_task);
4897
return FILTER_HANDLED;
4898
}
4899
4900
static void
4901
bnxt_clear_ids(struct bnxt_softc *softc)
4902
{
4903
int i;
4904
4905
softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4906
softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4907
softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
4908
softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4909
for (i = 0; i < softc->ntxqsets; i++) {
4910
softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4911
softc->tx_cp_rings[i].ring.phys_id =
4912
(uint16_t)HWRM_NA_SIGNATURE;
4913
softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4914
4915
if (!softc->nq_rings)
4916
continue;
4917
softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4918
softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4919
}
4920
for (i = 0; i < softc->nrxqsets; i++) {
4921
softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
4922
softc->rx_cp_rings[i].ring.phys_id =
4923
(uint16_t)HWRM_NA_SIGNATURE;
4924
softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4925
softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
4926
softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
4927
}
4928
softc->vnic_info.filter_id = -1;
4929
softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
4930
softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
4931
memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
4932
softc->vnic_info.rss_grp_tbl.idi_size);
4933
}
4934
4935
static void
4936
bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
4937
{
4938
struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
4939
int i;
4940
4941
for (i = 0; i < cpr->ring.ring_size; i++)
4942
cmp[i].info3_v = !cpr->v_bit;
4943
}
4944
4945
static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
4946
{
4947
u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
4948
4949
switch (err_type) {
4950
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
4951
device_printf(softc->dev,
4952
"1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
4953
BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
4954
break;
4955
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
4956
device_printf(softc->dev,
4957
"Pause Storm detected!\n");
4958
break;
4959
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
4960
device_printf(softc->dev,
4961
"One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
4962
BNXT_EVENT_DBR_EPOCH(data1));
4963
break;
4964
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
4965
const char *nvm_err_str;
4966
4967
if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
4968
nvm_err_str = "nvm write error";
4969
else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
4970
nvm_err_str = "nvm erase error";
4971
else
4972
nvm_err_str = "unrecognized nvm error";
4973
4974
device_printf(softc->dev,
4975
"%s reported at address 0x%x\n", nvm_err_str,
4976
(u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
4977
break;
4978
}
4979
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
4980
char *threshold_type;
4981
char *dir_str;
4982
4983
switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
4984
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
4985
threshold_type = "warning";
4986
break;
4987
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
4988
threshold_type = "critical";
4989
break;
4990
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
4991
threshold_type = "fatal";
4992
break;
4993
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
4994
threshold_type = "shutdown";
4995
break;
4996
default:
4997
device_printf(softc->dev,
4998
"Unknown Thermal threshold type event\n");
4999
return;
5000
}
5001
if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
5002
dir_str = "above";
5003
else
5004
dir_str = "below";
5005
device_printf(softc->dev,
5006
"Chip temperature has gone %s the %s thermal threshold!\n",
5007
dir_str, threshold_type);
5008
device_printf(softc->dev,
5009
"Temperature (In Celsius), Current: %u, threshold: %u\n",
5010
BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
5011
BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
5012
break;
5013
}
5014
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
5015
device_printf(softc->dev,
5016
"Speed change is not supported with dual rate transceivers on this board\n");
5017
break;
5018
5019
default:
5020
device_printf(softc->dev,
5021
"FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
5022
err_type, data1, data2);
5023
break;
5024
}
5025
}
5026
5027
static void
5028
bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
5029
{
5030
struct hwrm_async_event_cmpl *ae = (void *)cmpl;
5031
uint16_t async_id = le16toh(ae->event_id);
5032
struct ifmediareq ifmr;
5033
char *type_str;
5034
char *status_desc;
5035
struct bnxt_fw_health *fw_health;
5036
u32 data1 = le32toh(ae->event_data1);
5037
u32 data2 = le32toh(ae->event_data2);
5038
5039
switch (async_id) {
5040
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
5041
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
5042
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
5043
if (BNXT_CHIP_P5_PLUS(softc))
5044
bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
5045
else
5046
bnxt_media_status(softc->ctx, &ifmr);
5047
break;
5048
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
5049
bnxt_event_error_report(softc, data1, data2);
5050
goto async_event_process_exit;
5051
}
5052
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
5053
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
5054
break;
5055
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
5056
type_str = "Solicited";
5057
5058
if (!softc->fw_health)
5059
goto async_event_process_exit;
5060
5061
softc->fw_reset_timestamp = jiffies;
5062
softc->fw_reset_min_dsecs = ae->timestamp_lo;
5063
if (!softc->fw_reset_min_dsecs)
5064
softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
5065
softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
5066
if (!softc->fw_reset_max_dsecs)
5067
softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
5068
if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
5069
set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
5070
} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5071
type_str = "Fatal";
5072
softc->fw_health->fatalities++;
5073
set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
5074
} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
5075
EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
5076
type_str = "Non-fatal";
5077
softc->fw_health->survivals++;
5078
set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
5079
}
5080
device_printf(softc->dev,
5081
"%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
5082
type_str, data1, data2,
5083
softc->fw_reset_min_dsecs * 100,
5084
softc->fw_reset_max_dsecs * 100);
5085
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
5086
break;
5087
}
5088
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
5089
fw_health = softc->fw_health;
5090
status_desc = "healthy";
5091
u32 status;
5092
5093
if (!fw_health)
5094
goto async_event_process_exit;
5095
5096
if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
5097
fw_health->enabled = false;
5098
device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
5099
break;
5100
}
5101
fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
5102
fw_health->tmr_multiplier =
5103
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
5104
HZ * 10);
5105
fw_health->tmr_counter = fw_health->tmr_multiplier;
5106
if (!fw_health->enabled)
5107
fw_health->last_fw_heartbeat =
5108
bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
5109
fw_health->last_fw_reset_cnt =
5110
bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
5111
status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
5112
if (status != BNXT_FW_STATUS_HEALTHY)
5113
status_desc = "unhealthy";
5114
device_printf(softc->dev,
5115
"Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
5116
fw_health->primary ? "primary" : "backup", status,
5117
status_desc, fw_health->last_fw_reset_cnt);
5118
if (!fw_health->enabled) {
5119
/* Make sure tmr_counter is set and seen by
5120
* bnxt_health_check() before setting enabled
5121
*/
5122
smp_mb();
5123
fw_health->enabled = true;
5124
}
5125
goto async_event_process_exit;
5126
}
5127
5128
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
5129
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
5130
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
5131
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
5132
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
5133
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
5134
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
5135
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
5136
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
5137
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
5138
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
5139
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
5140
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
5141
device_printf(softc->dev,
5142
"Unhandled async completion type %u\n", async_id);
5143
break;
5144
default:
5145
dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5146
async_id);
5147
break;
5148
}
5149
bnxt_queue_sp_work(softc);
5150
5151
async_event_process_exit:
5152
bnxt_ulp_async_events(softc, ae);
5153
}
5154
5155
static void
5156
bnxt_def_cp_task(void *context, int pending)
5157
{
5158
if_ctx_t ctx = context;
5159
struct bnxt_softc *softc = iflib_get_softc(ctx);
5160
struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
5161
5162
/* Handle completions on the default completion ring */
5163
struct cmpl_base *cmpl;
5164
uint32_t cons = cpr->cons;
5165
bool v_bit = cpr->v_bit;
5166
bool last_v_bit;
5167
uint32_t last_cons;
5168
uint16_t type;
5169
5170
for (;;) {
5171
last_cons = cons;
5172
last_v_bit = v_bit;
5173
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
5174
cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
5175
5176
if (!CMP_VALID(cmpl, v_bit))
5177
break;
5178
5179
type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
5180
switch (type) {
5181
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
5182
bnxt_handle_async_event(softc, cmpl);
5183
break;
5184
case CMPL_BASE_TYPE_TX_L2:
5185
case CMPL_BASE_TYPE_RX_L2:
5186
case CMPL_BASE_TYPE_RX_L2_V3:
5187
case CMPL_BASE_TYPE_RX_AGG:
5188
case CMPL_BASE_TYPE_RX_TPA_START:
5189
case CMPL_BASE_TYPE_RX_TPA_START_V3:
5190
case CMPL_BASE_TYPE_RX_TPA_END:
5191
case CMPL_BASE_TYPE_STAT_EJECT:
5192
case CMPL_BASE_TYPE_HWRM_DONE:
5193
case CMPL_BASE_TYPE_HWRM_FWD_REQ:
5194
case CMPL_BASE_TYPE_HWRM_FWD_RESP:
5195
case CMPL_BASE_TYPE_CQ_NOTIFICATION:
5196
case CMPL_BASE_TYPE_SRQ_EVENT:
5197
case CMPL_BASE_TYPE_DBQ_EVENT:
5198
case CMPL_BASE_TYPE_QP_EVENT:
5199
case CMPL_BASE_TYPE_FUNC_EVENT:
5200
dev_dbg(softc->dev, "Unhandled Async event completion type %u\n",
5201
type);
5202
break;
5203
default:
5204
dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
5205
type);
5206
break;
5207
}
5208
}
5209
5210
cpr->cons = last_cons;
5211
cpr->v_bit = last_v_bit;
5212
softc->db_ops.bnxt_db_rx_cq(cpr, 1);
5213
}
5214
5215
uint8_t
5216
get_phy_type(struct bnxt_softc *softc)
5217
{
5218
struct bnxt_link_info *link_info = &softc->link_info;
5219
uint8_t phy_type = link_info->phy_type;
5220
uint16_t supported;
5221
5222
if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
5223
return phy_type;
5224
5225
/* Deduce the phy type from the media type and supported speeds */
5226
supported = link_info->support_speeds;
5227
5228
if (link_info->media_type ==
5229
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
5230
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
5231
if (link_info->media_type ==
5232
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
5233
if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
5234
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
5235
if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
5236
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
5237
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
5238
}
5239
if (link_info->media_type ==
5240
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
5241
return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
5242
5243
return phy_type;
5244
}
5245
5246
bool
5247
bnxt_check_hwrm_version(struct bnxt_softc *softc)
5248
{
5249
char buf[16];
5250
5251
sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
5252
softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
5253
if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
5254
device_printf(softc->dev,
5255
"WARNING: HWRM version %s is too old (older than %s)\n",
5256
softc->ver_info->hwrm_if_ver, buf);
5257
return false;
5258
}
5259
else if(softc->ver_info->hwrm_min_major ==
5260
softc->ver_info->hwrm_if_major) {
5261
if (softc->ver_info->hwrm_min_minor >
5262
softc->ver_info->hwrm_if_minor) {
5263
device_printf(softc->dev,
5264
"WARNING: HWRM version %s is too old (older than %s)\n",
5265
softc->ver_info->hwrm_if_ver, buf);
5266
return false;
5267
}
5268
else if (softc->ver_info->hwrm_min_minor ==
5269
softc->ver_info->hwrm_if_minor) {
5270
if (softc->ver_info->hwrm_min_update >
5271
softc->ver_info->hwrm_if_update) {
5272
device_printf(softc->dev,
5273
"WARNING: HWRM version %s is too old (older than %s)\n",
5274
softc->ver_info->hwrm_if_ver, buf);
5275
return false;
5276
}
5277
}
5278
}
5279
return true;
5280
}
5281
5282
static uint64_t
5283
bnxt_get_baudrate(struct bnxt_link_info *link)
5284
{
5285
switch (link->link_speed) {
5286
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
5287
return IF_Mbps(100);
5288
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
5289
return IF_Gbps(1);
5290
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
5291
return IF_Gbps(2);
5292
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
5293
return IF_Mbps(2500);
5294
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
5295
return IF_Gbps(10);
5296
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
5297
return IF_Gbps(20);
5298
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
5299
return IF_Gbps(25);
5300
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
5301
return IF_Gbps(40);
5302
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
5303
return IF_Gbps(50);
5304
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
5305
return IF_Gbps(100);
5306
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
5307
return IF_Mbps(10);
5308
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
5309
return IF_Gbps(200);
5310
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB:
5311
return IF_Gbps(400);
5312
}
5313
return IF_Gbps(100);
5314
}
5315
5316
static void
5317
bnxt_get_wol_settings(struct bnxt_softc *softc)
5318
{
5319
uint16_t wol_handle = 0;
5320
5321
if (!bnxt_wol_supported(softc))
5322
return;
5323
5324
do {
5325
wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
5326
} while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);
5327
}
5328
5329