Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/bxe/bxe_stats.c
39507 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
*
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26
* THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/cdefs.h>
30
#include "bxe.h"
31
#include "bxe_stats.h"
32
33
#ifdef __i386__
34
#define BITS_PER_LONG 32
35
#else
36
#define BITS_PER_LONG 64
37
#endif
38
39
40
static inline long
41
bxe_hilo(uint32_t *hiref)
42
{
43
uint32_t lo = *(hiref + 1);
44
#if (BITS_PER_LONG == 64)
45
uint32_t hi = *hiref;
46
return (HILO_U64(hi, lo));
47
#else
48
return (lo);
49
#endif
50
}
51
52
static inline uint16_t
53
bxe_get_port_stats_dma_len(struct bxe_softc *sc)
54
{
55
uint16_t res = 0;
56
uint32_t size;
57
58
/* 'newest' convention - shmem2 contains the size of the port stats */
59
if (SHMEM2_HAS(sc, sizeof_port_stats)) {
60
size = SHMEM2_RD(sc, sizeof_port_stats);
61
if (size) {
62
res = size;
63
}
64
65
/* prevent newer BC from causing buffer overflow */
66
if (res > sizeof(struct host_port_stats)) {
67
res = sizeof(struct host_port_stats);
68
}
69
}
70
71
/*
72
* Older convention - all BCs support the port stats fields up until
73
* the 'not_used' field
74
*/
75
if (!res) {
76
res = (offsetof(struct host_port_stats, not_used) + 4);
77
78
/* if PFC stats are supported by the MFW, DMA them as well */
79
if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
80
res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
81
offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
82
}
83
}
84
85
res >>= 2;
86
87
DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n"));
88
return (res);
89
}
90
91
/*
92
* Init service functions
93
*/
94
95
static void
96
bxe_dp_stats(struct bxe_softc *sc)
97
{
98
int i;
99
100
BLOGD(sc, DBG_STATS,
101
"dumping stats:\n"
102
" fw_stats_req\n"
103
" hdr\n"
104
" cmd_num %d\n"
105
" reserved0 %d\n"
106
" drv_stats_counter %d\n"
107
" reserved1 %d\n"
108
" stats_counters_addrs %x %x\n",
109
sc->fw_stats_req->hdr.cmd_num,
110
sc->fw_stats_req->hdr.reserved0,
111
sc->fw_stats_req->hdr.drv_stats_counter,
112
sc->fw_stats_req->hdr.reserved1,
113
sc->fw_stats_req->hdr.stats_counters_addrs.hi,
114
sc->fw_stats_req->hdr.stats_counters_addrs.lo);
115
116
for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) {
117
BLOGD(sc, DBG_STATS,
118
"query[%d]\n"
119
" kind %d\n"
120
" index %d\n"
121
" funcID %d\n"
122
" reserved %d\n"
123
" address %x %x\n",
124
i,
125
sc->fw_stats_req->query[i].kind,
126
sc->fw_stats_req->query[i].index,
127
sc->fw_stats_req->query[i].funcID,
128
sc->fw_stats_req->query[i].reserved,
129
sc->fw_stats_req->query[i].address.hi,
130
sc->fw_stats_req->query[i].address.lo);
131
}
132
}
133
134
/*
135
* Post the next statistics ramrod. Protect it with the lock in
136
* order to ensure the strict order between statistics ramrods
137
* (each ramrod has a sequence number passed in a
138
* sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
139
* sent in order).
140
*/
141
static void
142
bxe_storm_stats_post(struct bxe_softc *sc)
143
{
144
int rc;
145
146
if (!sc->stats_pending) {
147
BXE_STATS_LOCK(sc);
148
149
if (sc->stats_pending) {
150
BXE_STATS_UNLOCK(sc);
151
return;
152
}
153
154
sc->fw_stats_req->hdr.drv_stats_counter =
155
htole16(sc->stats_counter++);
156
157
BLOGD(sc, DBG_STATS,
158
"sending statistics ramrod %d\n",
159
le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
160
161
/* adjust the ramrod to include VF queues statistics */
162
// XXX bxe_iov_adjust_stats_req(sc);
163
164
bxe_dp_stats(sc);
165
166
/* send FW stats ramrod */
167
rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
168
U64_HI(sc->fw_stats_req_mapping),
169
U64_LO(sc->fw_stats_req_mapping),
170
NONE_CONNECTION_TYPE);
171
if (rc == 0) {
172
sc->stats_pending = 1;
173
}
174
175
BXE_STATS_UNLOCK(sc);
176
}
177
}
178
179
static void
180
bxe_hw_stats_post(struct bxe_softc *sc)
181
{
182
struct dmae_cmd *dmae = &sc->stats_dmae;
183
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
184
int loader_idx;
185
uint32_t opcode;
186
187
*stats_comp = DMAE_COMP_VAL;
188
if (CHIP_REV_IS_SLOW(sc)) {
189
return;
190
}
191
192
/* Update MCP's statistics if possible */
193
if (sc->func_stx) {
194
memcpy(BXE_SP(sc, func_stats), &sc->func_stats,
195
sizeof(sc->func_stats));
196
}
197
198
/* loader */
199
if (sc->executer_idx) {
200
loader_idx = PMF_DMAE_C(sc);
201
opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
202
TRUE, DMAE_COMP_GRC);
203
opcode = bxe_dmae_opcode_clr_src_reset(opcode);
204
205
memset(dmae, 0, sizeof(struct dmae_cmd));
206
dmae->opcode = opcode;
207
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
208
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
209
dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
210
sizeof(struct dmae_cmd) *
211
(loader_idx + 1)) >> 2);
212
dmae->dst_addr_hi = 0;
213
dmae->len = sizeof(struct dmae_cmd) >> 2;
214
if (CHIP_IS_E1(sc)) {
215
dmae->len--;
216
}
217
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
218
dmae->comp_addr_hi = 0;
219
dmae->comp_val = 1;
220
221
*stats_comp = 0;
222
bxe_post_dmae(sc, dmae, loader_idx);
223
} else if (sc->func_stx) {
224
*stats_comp = 0;
225
bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
226
}
227
}
228
229
static int
230
bxe_stats_comp(struct bxe_softc *sc)
231
{
232
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
233
int cnt = 10;
234
235
while (*stats_comp != DMAE_COMP_VAL) {
236
if (!cnt) {
237
BLOGE(sc, "Timeout waiting for stats finished\n");
238
BXE_SET_ERROR_BIT(sc, BXE_ERR_STATS_TO);
239
taskqueue_enqueue_timeout(taskqueue_thread,
240
&sc->sp_err_timeout_task, hz/10);
241
break;
242
243
}
244
245
cnt--;
246
DELAY(1000);
247
}
248
249
return (1);
250
}
251
252
/*
253
* Statistics service functions
254
*/
255
256
static void
257
bxe_stats_pmf_update(struct bxe_softc *sc)
258
{
259
struct dmae_cmd *dmae;
260
uint32_t opcode;
261
int loader_idx = PMF_DMAE_C(sc);
262
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
263
264
if (sc->devinfo.bc_ver <= 0x06001400) {
265
/*
266
* Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
267
* BRB registers while the BRB block is in reset. The DMA transfer
268
* below triggers this issue resulting in the DMAE to stop
269
* functioning. Skip this initial stats transfer for old bootcode
270
* versions <= 6.0.20.
271
*/
272
return;
273
}
274
275
/* sanity */
276
if (!sc->port.pmf || !sc->port.port_stx) {
277
BLOGE(sc, "BUG!\n");
278
return;
279
}
280
281
sc->executer_idx = 0;
282
283
opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
284
285
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
286
dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
287
dmae->src_addr_lo = (sc->port.port_stx >> 2);
288
dmae->src_addr_hi = 0;
289
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
290
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
291
dmae->len = DMAE_LEN32_RD_MAX;
292
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
293
dmae->comp_addr_hi = 0;
294
dmae->comp_val = 1;
295
296
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
297
dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
298
dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
299
dmae->src_addr_hi = 0;
300
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) +
301
DMAE_LEN32_RD_MAX * 4);
302
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) +
303
DMAE_LEN32_RD_MAX * 4);
304
dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
305
306
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
307
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
308
dmae->comp_val = DMAE_COMP_VAL;
309
310
*stats_comp = 0;
311
bxe_hw_stats_post(sc);
312
bxe_stats_comp(sc);
313
}
314
315
static void
316
bxe_port_stats_init(struct bxe_softc *sc)
317
{
318
struct dmae_cmd *dmae;
319
int port = SC_PORT(sc);
320
uint32_t opcode;
321
int loader_idx = PMF_DMAE_C(sc);
322
uint32_t mac_addr;
323
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
324
325
/* sanity */
326
if (!sc->link_vars.link_up || !sc->port.pmf) {
327
BLOGE(sc, "BUG!\n");
328
return;
329
}
330
331
sc->executer_idx = 0;
332
333
/* MCP */
334
opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
335
TRUE, DMAE_COMP_GRC);
336
337
if (sc->port.port_stx) {
338
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
339
dmae->opcode = opcode;
340
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
341
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
342
dmae->dst_addr_lo = sc->port.port_stx >> 2;
343
dmae->dst_addr_hi = 0;
344
dmae->len = bxe_get_port_stats_dma_len(sc);
345
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
346
dmae->comp_addr_hi = 0;
347
dmae->comp_val = 1;
348
}
349
350
if (sc->func_stx) {
351
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
352
dmae->opcode = opcode;
353
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
354
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
355
dmae->dst_addr_lo = (sc->func_stx >> 2);
356
dmae->dst_addr_hi = 0;
357
dmae->len = (sizeof(struct host_func_stats) >> 2);
358
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
359
dmae->comp_addr_hi = 0;
360
dmae->comp_val = 1;
361
}
362
363
/* MAC */
364
opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
365
TRUE, DMAE_COMP_GRC);
366
367
/* EMAC is special */
368
if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
369
mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
370
371
/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
372
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
373
dmae->opcode = opcode;
374
dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
375
dmae->src_addr_hi = 0;
376
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
377
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
378
dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
379
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
380
dmae->comp_addr_hi = 0;
381
dmae->comp_val = 1;
382
383
/* EMAC_REG_EMAC_RX_STAT_AC_28 */
384
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
385
dmae->opcode = opcode;
386
dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
387
dmae->src_addr_hi = 0;
388
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
389
offsetof(struct emac_stats,
390
rx_stat_falsecarriererrors));
391
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
392
offsetof(struct emac_stats,
393
rx_stat_falsecarriererrors));
394
dmae->len = 1;
395
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
396
dmae->comp_addr_hi = 0;
397
dmae->comp_val = 1;
398
399
/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
400
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
401
dmae->opcode = opcode;
402
dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
403
dmae->src_addr_hi = 0;
404
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) +
405
offsetof(struct emac_stats,
406
tx_stat_ifhcoutoctets));
407
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) +
408
offsetof(struct emac_stats,
409
tx_stat_ifhcoutoctets));
410
dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
411
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
412
dmae->comp_addr_hi = 0;
413
dmae->comp_val = 1;
414
} else {
415
uint32_t tx_src_addr_lo, rx_src_addr_lo;
416
uint16_t rx_len, tx_len;
417
418
/* configure the params according to MAC type */
419
switch (sc->link_vars.mac_type) {
420
case ELINK_MAC_TYPE_BMAC:
421
mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
422
NIG_REG_INGRESS_BMAC0_MEM;
423
424
/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
425
BIGMAC_REGISTER_TX_STAT_GTBYT */
426
if (CHIP_IS_E1x(sc)) {
427
tx_src_addr_lo =
428
((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
429
tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
430
BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
431
rx_src_addr_lo =
432
((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
433
rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
434
BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
435
} else {
436
tx_src_addr_lo =
437
((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
438
tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
439
BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
440
rx_src_addr_lo =
441
((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
442
rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
443
BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
444
}
445
446
break;
447
448
case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
449
case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
450
default:
451
mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
452
tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
453
rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
454
tx_len =
455
(sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
456
rx_len =
457
(sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
458
break;
459
}
460
461
/* TX stats */
462
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
463
dmae->opcode = opcode;
464
dmae->src_addr_lo = tx_src_addr_lo;
465
dmae->src_addr_hi = 0;
466
dmae->len = tx_len;
467
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats));
468
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats));
469
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
470
dmae->comp_addr_hi = 0;
471
dmae->comp_val = 1;
472
473
/* RX stats */
474
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
475
dmae->opcode = opcode;
476
dmae->src_addr_hi = 0;
477
dmae->src_addr_lo = rx_src_addr_lo;
478
dmae->dst_addr_lo =
479
U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
480
dmae->dst_addr_hi =
481
U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
482
dmae->len = rx_len;
483
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
484
dmae->comp_addr_hi = 0;
485
dmae->comp_val = 1;
486
}
487
488
/* NIG */
489
if (!CHIP_IS_E3(sc)) {
490
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
491
dmae->opcode = opcode;
492
dmae->src_addr_lo =
493
(port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
494
NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
495
dmae->src_addr_hi = 0;
496
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
497
offsetof(struct nig_stats,
498
egress_mac_pkt0_lo));
499
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
500
offsetof(struct nig_stats,
501
egress_mac_pkt0_lo));
502
dmae->len = ((2 * sizeof(uint32_t)) >> 2);
503
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
504
dmae->comp_addr_hi = 0;
505
dmae->comp_val = 1;
506
507
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
508
dmae->opcode = opcode;
509
dmae->src_addr_lo =
510
(port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
511
NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
512
dmae->src_addr_hi = 0;
513
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) +
514
offsetof(struct nig_stats,
515
egress_mac_pkt1_lo));
516
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) +
517
offsetof(struct nig_stats,
518
egress_mac_pkt1_lo));
519
dmae->len = ((2 * sizeof(uint32_t)) >> 2);
520
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
521
dmae->comp_addr_hi = 0;
522
dmae->comp_val = 1;
523
}
524
525
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
526
dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
527
TRUE, DMAE_COMP_PCI);
528
dmae->src_addr_lo =
529
(port ? NIG_REG_STAT1_BRB_DISCARD :
530
NIG_REG_STAT0_BRB_DISCARD) >> 2;
531
dmae->src_addr_hi = 0;
532
dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats));
533
dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats));
534
dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2;
535
536
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
537
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
538
dmae->comp_val = DMAE_COMP_VAL;
539
540
*stats_comp = 0;
541
}
542
543
static void
544
bxe_func_stats_init(struct bxe_softc *sc)
545
{
546
struct dmae_cmd *dmae = &sc->stats_dmae;
547
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
548
549
/* sanity */
550
if (!sc->func_stx) {
551
BLOGE(sc, "BUG!\n");
552
return;
553
}
554
555
sc->executer_idx = 0;
556
memset(dmae, 0, sizeof(struct dmae_cmd));
557
558
dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
559
TRUE, DMAE_COMP_PCI);
560
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
561
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
562
dmae->dst_addr_lo = (sc->func_stx >> 2);
563
dmae->dst_addr_hi = 0;
564
dmae->len = (sizeof(struct host_func_stats) >> 2);
565
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
566
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
567
dmae->comp_val = DMAE_COMP_VAL;
568
569
*stats_comp = 0;
570
}
571
572
static void
573
bxe_stats_start(struct bxe_softc *sc)
574
{
575
/*
576
* VFs travel through here as part of the statistics FSM, but no action
577
* is required
578
*/
579
if (IS_VF(sc)) {
580
return;
581
}
582
583
if (sc->port.pmf) {
584
bxe_port_stats_init(sc);
585
}
586
587
else if (sc->func_stx) {
588
bxe_func_stats_init(sc);
589
}
590
591
bxe_hw_stats_post(sc);
592
bxe_storm_stats_post(sc);
593
}
594
595
static void
596
bxe_stats_pmf_start(struct bxe_softc *sc)
597
{
598
bxe_stats_comp(sc);
599
bxe_stats_pmf_update(sc);
600
bxe_stats_start(sc);
601
}
602
603
static void
604
bxe_stats_restart(struct bxe_softc *sc)
605
{
606
/*
607
* VFs travel through here as part of the statistics FSM, but no action
608
* is required
609
*/
610
if (IS_VF(sc)) {
611
return;
612
}
613
614
bxe_stats_comp(sc);
615
bxe_stats_start(sc);
616
}
617
618
static void
619
bxe_bmac_stats_update(struct bxe_softc *sc)
620
{
621
struct host_port_stats *pstats = BXE_SP(sc, port_stats);
622
struct bxe_eth_stats *estats = &sc->eth_stats;
623
struct {
624
uint32_t lo;
625
uint32_t hi;
626
} diff;
627
628
if (CHIP_IS_E1x(sc)) {
629
struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats);
630
631
/* the macros below will use "bmac1_stats" type */
632
UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
633
UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
634
UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
635
UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
636
UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
637
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
638
UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
639
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
640
UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
641
642
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
643
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
644
UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
645
UPDATE_STAT64(tx_stat_gt127,
646
tx_stat_etherstatspkts65octetsto127octets);
647
UPDATE_STAT64(tx_stat_gt255,
648
tx_stat_etherstatspkts128octetsto255octets);
649
UPDATE_STAT64(tx_stat_gt511,
650
tx_stat_etherstatspkts256octetsto511octets);
651
UPDATE_STAT64(tx_stat_gt1023,
652
tx_stat_etherstatspkts512octetsto1023octets);
653
UPDATE_STAT64(tx_stat_gt1518,
654
tx_stat_etherstatspkts1024octetsto1522octets);
655
UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
656
UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
657
UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
658
UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
659
UPDATE_STAT64(tx_stat_gterr,
660
tx_stat_dot3statsinternalmactransmiterrors);
661
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
662
} else {
663
struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats);
664
struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
665
666
/* the macros below will use "bmac2_stats" type */
667
UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
668
UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
669
UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
670
UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
671
UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
672
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
673
UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
674
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
675
UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
676
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
677
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
678
UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
679
UPDATE_STAT64(tx_stat_gt127,
680
tx_stat_etherstatspkts65octetsto127octets);
681
UPDATE_STAT64(tx_stat_gt255,
682
tx_stat_etherstatspkts128octetsto255octets);
683
UPDATE_STAT64(tx_stat_gt511,
684
tx_stat_etherstatspkts256octetsto511octets);
685
UPDATE_STAT64(tx_stat_gt1023,
686
tx_stat_etherstatspkts512octetsto1023octets);
687
UPDATE_STAT64(tx_stat_gt1518,
688
tx_stat_etherstatspkts1024octetsto1522octets);
689
UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
690
UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
691
UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
692
UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
693
UPDATE_STAT64(tx_stat_gterr,
694
tx_stat_dot3statsinternalmactransmiterrors);
695
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
696
697
/* collect PFC stats */
698
pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
699
pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
700
ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
701
pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
702
703
pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
704
pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
705
ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
706
pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
707
}
708
709
estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
710
estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
711
712
estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
713
estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
714
715
estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
716
estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
717
estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
718
estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
719
}
720
721
static void
722
bxe_mstat_stats_update(struct bxe_softc *sc)
723
{
724
struct host_port_stats *pstats = BXE_SP(sc, port_stats);
725
struct bxe_eth_stats *estats = &sc->eth_stats;
726
struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats);
727
728
ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
729
ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
730
ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
731
ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
732
ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
733
ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
734
ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
735
ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
736
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
737
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
738
739
/* collect pfc stats */
740
ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
741
pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
742
ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
743
pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
744
745
ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
746
ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
747
ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
748
ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
749
ADD_STAT64(stats_tx.tx_gt1023,
750
tx_stat_etherstatspkts512octetsto1023octets);
751
ADD_STAT64(stats_tx.tx_gt1518,
752
tx_stat_etherstatspkts1024octetsto1522octets);
753
ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
754
755
ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
756
ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
757
ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
758
759
ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
760
ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
761
762
estats->etherstatspkts1024octetsto1522octets_hi =
763
pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
764
estats->etherstatspkts1024octetsto1522octets_lo =
765
pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
766
767
estats->etherstatspktsover1522octets_hi =
768
pstats->mac_stx[1].tx_stat_mac_2047_hi;
769
estats->etherstatspktsover1522octets_lo =
770
pstats->mac_stx[1].tx_stat_mac_2047_lo;
771
772
ADD_64(estats->etherstatspktsover1522octets_hi,
773
pstats->mac_stx[1].tx_stat_mac_4095_hi,
774
estats->etherstatspktsover1522octets_lo,
775
pstats->mac_stx[1].tx_stat_mac_4095_lo);
776
777
ADD_64(estats->etherstatspktsover1522octets_hi,
778
pstats->mac_stx[1].tx_stat_mac_9216_hi,
779
estats->etherstatspktsover1522octets_lo,
780
pstats->mac_stx[1].tx_stat_mac_9216_lo);
781
782
ADD_64(estats->etherstatspktsover1522octets_hi,
783
pstats->mac_stx[1].tx_stat_mac_16383_hi,
784
estats->etherstatspktsover1522octets_lo,
785
pstats->mac_stx[1].tx_stat_mac_16383_lo);
786
787
estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
788
estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
789
790
estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
791
estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
792
793
estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
794
estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
795
estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
796
estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
797
}
798
799
static void
800
bxe_emac_stats_update(struct bxe_softc *sc)
801
{
802
struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats);
803
struct host_port_stats *pstats = BXE_SP(sc, port_stats);
804
struct bxe_eth_stats *estats = &sc->eth_stats;
805
806
UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
807
UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
808
UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
809
UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
810
UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
811
UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
812
UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
813
UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
814
UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
815
UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
816
UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
817
UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
818
UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
819
UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
820
UPDATE_EXTEND_STAT(tx_stat_outxonsent);
821
UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
822
UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
823
UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
824
UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
825
UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
826
UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
827
UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
828
UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
829
UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
830
UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
831
UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
832
UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
833
UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
834
UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
835
UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
836
UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
837
838
estats->pause_frames_received_hi =
839
pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
840
estats->pause_frames_received_lo =
841
pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
842
ADD_64(estats->pause_frames_received_hi,
843
pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
844
estats->pause_frames_received_lo,
845
pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
846
847
estats->pause_frames_sent_hi =
848
pstats->mac_stx[1].tx_stat_outxonsent_hi;
849
estats->pause_frames_sent_lo =
850
pstats->mac_stx[1].tx_stat_outxonsent_lo;
851
ADD_64(estats->pause_frames_sent_hi,
852
pstats->mac_stx[1].tx_stat_outxoffsent_hi,
853
estats->pause_frames_sent_lo,
854
pstats->mac_stx[1].tx_stat_outxoffsent_lo);
855
}
856
857
static int
858
bxe_hw_stats_update(struct bxe_softc *sc)
859
{
860
struct nig_stats *new = BXE_SP(sc, nig_stats);
861
struct nig_stats *old = &(sc->port.old_nig_stats);
862
struct host_port_stats *pstats = BXE_SP(sc, port_stats);
863
struct bxe_eth_stats *estats = &sc->eth_stats;
864
uint32_t lpi_reg, nig_timer_max;
865
struct {
866
uint32_t lo;
867
uint32_t hi;
868
} diff;
869
870
switch (sc->link_vars.mac_type) {
871
case ELINK_MAC_TYPE_BMAC:
872
bxe_bmac_stats_update(sc);
873
break;
874
875
case ELINK_MAC_TYPE_EMAC:
876
bxe_emac_stats_update(sc);
877
break;
878
879
case ELINK_MAC_TYPE_UMAC:
880
case ELINK_MAC_TYPE_XMAC:
881
bxe_mstat_stats_update(sc);
882
break;
883
884
case ELINK_MAC_TYPE_NONE: /* unreached */
885
BLOGD(sc, DBG_STATS,
886
"stats updated by DMAE but no MAC active\n");
887
return (-1);
888
889
default: /* unreached */
890
BLOGE(sc, "stats update failed, unknown MAC type\n");
891
}
892
893
ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
894
new->brb_discard - old->brb_discard);
895
ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
896
new->brb_truncate - old->brb_truncate);
897
898
if (!CHIP_IS_E3(sc)) {
899
UPDATE_STAT64_NIG(egress_mac_pkt0,
900
etherstatspkts1024octetsto1522octets);
901
UPDATE_STAT64_NIG(egress_mac_pkt1,
902
etherstatspktsover1522octets);
903
}
904
905
memcpy(old, new, sizeof(struct nig_stats));
906
907
memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
908
sizeof(struct mac_stx));
909
estats->brb_drop_hi = pstats->brb_drop_hi;
910
estats->brb_drop_lo = pstats->brb_drop_lo;
911
912
pstats->host_port_stats_counter++;
913
914
if (CHIP_IS_E3(sc)) {
915
lpi_reg = (SC_PORT(sc)) ?
916
MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
917
MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
918
estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
919
}
920
921
if (!BXE_NOMCP(sc)) {
922
nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
923
if (nig_timer_max != estats->nig_timer_max) {
924
estats->nig_timer_max = nig_timer_max;
925
/*NOTE: not setting error bit */
926
BLOGE(sc, "invalid NIG timer max (%u)\n",
927
estats->nig_timer_max);
928
}
929
}
930
931
return (0);
932
}
933
934
static int
935
bxe_storm_stats_validate_counters(struct bxe_softc *sc)
936
{
937
struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
938
uint16_t cur_stats_counter;
939
940
/*
941
* Make sure we use the value of the counter
942
* used for sending the last stats ramrod.
943
*/
944
BXE_STATS_LOCK(sc);
945
cur_stats_counter = (sc->stats_counter - 1);
946
BXE_STATS_UNLOCK(sc);
947
948
/* are storm stats valid? */
949
if (le16toh(counters->xstats_counter) != cur_stats_counter) {
950
BLOGD(sc, DBG_STATS,
951
"stats not updated by xstorm, "
952
"counter 0x%x != stats_counter 0x%x\n",
953
le16toh(counters->xstats_counter), sc->stats_counter);
954
return (-EAGAIN);
955
}
956
957
if (le16toh(counters->ustats_counter) != cur_stats_counter) {
958
BLOGD(sc, DBG_STATS,
959
"stats not updated by ustorm, "
960
"counter 0x%x != stats_counter 0x%x\n",
961
le16toh(counters->ustats_counter), sc->stats_counter);
962
return (-EAGAIN);
963
}
964
965
if (le16toh(counters->cstats_counter) != cur_stats_counter) {
966
BLOGD(sc, DBG_STATS,
967
"stats not updated by cstorm, "
968
"counter 0x%x != stats_counter 0x%x\n",
969
le16toh(counters->cstats_counter), sc->stats_counter);
970
return (-EAGAIN);
971
}
972
973
if (le16toh(counters->tstats_counter) != cur_stats_counter) {
974
BLOGD(sc, DBG_STATS,
975
"stats not updated by tstorm, "
976
"counter 0x%x != stats_counter 0x%x\n",
977
le16toh(counters->tstats_counter), sc->stats_counter);
978
return (-EAGAIN);
979
}
980
981
return (0);
982
}
983
984
static int
985
bxe_storm_stats_update(struct bxe_softc *sc)
986
{
987
struct tstorm_per_port_stats *tport =
988
&sc->fw_stats_data->port.tstorm_port_statistics;
989
struct tstorm_per_pf_stats *tfunc =
990
&sc->fw_stats_data->pf.tstorm_pf_statistics;
991
struct host_func_stats *fstats = &sc->func_stats;
992
struct bxe_eth_stats *estats = &sc->eth_stats;
993
struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old;
994
int i;
995
996
/* vfs stat counter is managed by pf */
997
if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) {
998
return (-EAGAIN);
999
}
1000
1001
estats->error_bytes_received_hi = 0;
1002
estats->error_bytes_received_lo = 0;
1003
1004
for (i = 0; i < sc->num_queues; i++) {
1005
struct bxe_fastpath *fp = &sc->fp[i];
1006
struct tstorm_per_queue_stats *tclient =
1007
&sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
1008
struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
1009
struct ustorm_per_queue_stats *uclient =
1010
&sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
1011
struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
1012
struct xstorm_per_queue_stats *xclient =
1013
&sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
1014
struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
1015
struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1016
struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1017
1018
uint32_t diff;
1019
1020
BLOGD(sc, DBG_STATS,
1021
"queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n",
1022
i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
1023
xclient->mcast_pkts_sent);
1024
1025
BLOGD(sc, DBG_STATS, "---------------\n");
1026
1027
UPDATE_QSTAT(tclient->rcv_bcast_bytes,
1028
total_broadcast_bytes_received);
1029
UPDATE_QSTAT(tclient->rcv_mcast_bytes,
1030
total_multicast_bytes_received);
1031
UPDATE_QSTAT(tclient->rcv_ucast_bytes,
1032
total_unicast_bytes_received);
1033
1034
/*
1035
* sum to total_bytes_received all
1036
* unicast/multicast/broadcast
1037
*/
1038
qstats->total_bytes_received_hi =
1039
qstats->total_broadcast_bytes_received_hi;
1040
qstats->total_bytes_received_lo =
1041
qstats->total_broadcast_bytes_received_lo;
1042
1043
ADD_64(qstats->total_bytes_received_hi,
1044
qstats->total_multicast_bytes_received_hi,
1045
qstats->total_bytes_received_lo,
1046
qstats->total_multicast_bytes_received_lo);
1047
1048
ADD_64(qstats->total_bytes_received_hi,
1049
qstats->total_unicast_bytes_received_hi,
1050
qstats->total_bytes_received_lo,
1051
qstats->total_unicast_bytes_received_lo);
1052
1053
qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
1054
qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
1055
1056
UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
1057
UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
1058
UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
1059
UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
1060
etherstatsoverrsizepkts, 32);
1061
UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
1062
1063
SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
1064
SUB_EXTEND_USTAT(mcast_no_buff_pkts,
1065
total_multicast_packets_received);
1066
SUB_EXTEND_USTAT(bcast_no_buff_pkts,
1067
total_broadcast_packets_received);
1068
UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
1069
UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
1070
UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
1071
1072
UPDATE_QSTAT(xclient->bcast_bytes_sent,
1073
total_broadcast_bytes_transmitted);
1074
UPDATE_QSTAT(xclient->mcast_bytes_sent,
1075
total_multicast_bytes_transmitted);
1076
UPDATE_QSTAT(xclient->ucast_bytes_sent,
1077
total_unicast_bytes_transmitted);
1078
1079
/*
1080
* sum to total_bytes_transmitted all
1081
* unicast/multicast/broadcast
1082
*/
1083
qstats->total_bytes_transmitted_hi =
1084
qstats->total_unicast_bytes_transmitted_hi;
1085
qstats->total_bytes_transmitted_lo =
1086
qstats->total_unicast_bytes_transmitted_lo;
1087
1088
ADD_64(qstats->total_bytes_transmitted_hi,
1089
qstats->total_broadcast_bytes_transmitted_hi,
1090
qstats->total_bytes_transmitted_lo,
1091
qstats->total_broadcast_bytes_transmitted_lo);
1092
1093
ADD_64(qstats->total_bytes_transmitted_hi,
1094
qstats->total_multicast_bytes_transmitted_hi,
1095
qstats->total_bytes_transmitted_lo,
1096
qstats->total_multicast_bytes_transmitted_lo);
1097
1098
UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
1099
total_unicast_packets_transmitted);
1100
UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1101
total_multicast_packets_transmitted);
1102
UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1103
total_broadcast_packets_transmitted);
1104
1105
UPDATE_EXTEND_TSTAT(checksum_discard,
1106
total_packets_received_checksum_discarded);
1107
UPDATE_EXTEND_TSTAT(ttl0_discard,
1108
total_packets_received_ttl0_discarded);
1109
1110
UPDATE_EXTEND_XSTAT(error_drop_pkts,
1111
total_transmitted_dropped_packets_error);
1112
1113
/* TPA aggregations completed */
1114
UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1115
/* Number of network frames aggregated by TPA */
1116
UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames);
1117
/* Total number of bytes in completed TPA aggregations */
1118
UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1119
1120
UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1121
1122
UPDATE_FSTAT_QSTAT(total_bytes_received);
1123
UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1124
UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1125
UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1126
UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1127
UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1128
UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1129
UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1130
UPDATE_FSTAT_QSTAT(valid_bytes_received);
1131
}
1132
1133
ADD_64(estats->total_bytes_received_hi,
1134
estats->rx_stat_ifhcinbadoctets_hi,
1135
estats->total_bytes_received_lo,
1136
estats->rx_stat_ifhcinbadoctets_lo);
1137
1138
ADD_64_LE(estats->total_bytes_received_hi,
1139
tfunc->rcv_error_bytes.hi,
1140
estats->total_bytes_received_lo,
1141
tfunc->rcv_error_bytes.lo);
1142
1143
ADD_64_LE(estats->error_bytes_received_hi,
1144
tfunc->rcv_error_bytes.hi,
1145
estats->error_bytes_received_lo,
1146
tfunc->rcv_error_bytes.lo);
1147
1148
UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1149
1150
ADD_64(estats->error_bytes_received_hi,
1151
estats->rx_stat_ifhcinbadoctets_hi,
1152
estats->error_bytes_received_lo,
1153
estats->rx_stat_ifhcinbadoctets_lo);
1154
1155
if (sc->port.pmf) {
1156
struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1157
UPDATE_FW_STAT(mac_filter_discard);
1158
UPDATE_FW_STAT(mf_tag_discard);
1159
UPDATE_FW_STAT(brb_truncate_discard);
1160
UPDATE_FW_STAT(mac_discard);
1161
}
1162
1163
fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1164
1165
sc->stats_pending = 0;
1166
1167
return (0);
1168
}
1169
1170
static void
1171
bxe_net_stats_update(struct bxe_softc *sc)
1172
{
1173
1174
for (int i = 0; i < sc->num_queues; i++)
1175
if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS,
1176
le32toh(sc->fp[i].old_tclient.checksum_discard));
1177
}
1178
1179
uint64_t
1180
bxe_get_counter(if_t ifp, ift_counter cnt)
1181
{
1182
struct bxe_softc *sc;
1183
struct bxe_eth_stats *estats;
1184
1185
sc = if_getsoftc(ifp);
1186
estats = &sc->eth_stats;
1187
1188
switch (cnt) {
1189
case IFCOUNTER_IPACKETS:
1190
return (bxe_hilo(&estats->total_unicast_packets_received_hi) +
1191
bxe_hilo(&estats->total_multicast_packets_received_hi) +
1192
bxe_hilo(&estats->total_broadcast_packets_received_hi));
1193
case IFCOUNTER_OPACKETS:
1194
return (bxe_hilo(&estats->total_unicast_packets_transmitted_hi) +
1195
bxe_hilo(&estats->total_multicast_packets_transmitted_hi) +
1196
bxe_hilo(&estats->total_broadcast_packets_transmitted_hi));
1197
case IFCOUNTER_IBYTES:
1198
return (bxe_hilo(&estats->total_bytes_received_hi));
1199
case IFCOUNTER_OBYTES:
1200
return (bxe_hilo(&estats->total_bytes_transmitted_hi));
1201
case IFCOUNTER_IERRORS:
1202
return (bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1203
bxe_hilo(&estats->etherstatsoverrsizepkts_hi) +
1204
bxe_hilo(&estats->brb_drop_hi) +
1205
bxe_hilo(&estats->brb_truncate_hi) +
1206
bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) +
1207
bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) +
1208
bxe_hilo(&estats->no_buff_discard_hi));
1209
case IFCOUNTER_OERRORS:
1210
return (bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) +
1211
bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi));
1212
case IFCOUNTER_IMCASTS:
1213
return (bxe_hilo(&estats->total_multicast_packets_received_hi));
1214
case IFCOUNTER_COLLISIONS:
1215
return (bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) +
1216
bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1217
bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi));
1218
default:
1219
return (if_get_counter_default(ifp, cnt));
1220
}
1221
}
1222
1223
static void
1224
bxe_drv_stats_update(struct bxe_softc *sc)
1225
{
1226
struct bxe_eth_stats *estats = &sc->eth_stats;
1227
int i;
1228
1229
for (i = 0; i < sc->num_queues; i++) {
1230
struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1231
struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1232
1233
UPDATE_ESTAT_QSTAT(rx_calls);
1234
UPDATE_ESTAT_QSTAT(rx_pkts);
1235
UPDATE_ESTAT_QSTAT(rx_tpa_pkts);
1236
UPDATE_ESTAT_QSTAT(rx_erroneous_jumbo_sge_pkts);
1237
UPDATE_ESTAT_QSTAT(rx_bxe_service_rxsgl);
1238
UPDATE_ESTAT_QSTAT(rx_jumbo_sge_pkts);
1239
UPDATE_ESTAT_QSTAT(rx_soft_errors);
1240
UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1241
UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1242
UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1243
UPDATE_ESTAT_QSTAT(rx_budget_reached);
1244
UPDATE_ESTAT_QSTAT(tx_pkts);
1245
UPDATE_ESTAT_QSTAT(tx_soft_errors);
1246
UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1247
UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1248
UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1249
UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso);
1250
UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits);
1251
UPDATE_ESTAT_QSTAT(tx_encap_failures);
1252
UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1253
UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1254
UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1255
UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1256
UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1257
UPDATE_ESTAT_QSTAT(tx_window_violation_tso);
1258
//UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6);
1259
//UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp);
1260
UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1261
UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1262
UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1263
1264
/* mbuf driver statistics */
1265
UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1266
UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1267
UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1268
UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1269
UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed);
1270
UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed);
1271
UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed);
1272
UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed);
1273
1274
/* track the number of allocated mbufs */
1275
UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1276
UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1277
UPDATE_ESTAT_QSTAT(mbuf_alloc_sge);
1278
UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa);
1279
}
1280
}
1281
1282
static uint8_t
1283
bxe_edebug_stats_stopped(struct bxe_softc *sc)
1284
{
1285
uint32_t val;
1286
1287
if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1288
val = SHMEM2_RD(sc, edebug_driver_if[1]);
1289
1290
if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) {
1291
return (TRUE);
1292
}
1293
}
1294
1295
return (FALSE);
1296
}
1297
1298
static void
1299
bxe_stats_update(struct bxe_softc *sc)
1300
{
1301
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1302
1303
if (bxe_edebug_stats_stopped(sc)) {
1304
return;
1305
}
1306
1307
if (IS_PF(sc)) {
1308
if (*stats_comp != DMAE_COMP_VAL) {
1309
return;
1310
}
1311
1312
if (sc->port.pmf) {
1313
bxe_hw_stats_update(sc);
1314
}
1315
1316
if (bxe_storm_stats_update(sc)) {
1317
if (sc->stats_pending++ == 3) {
1318
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
1319
BLOGE(sc, "Storm stats not updated for 3 times, resetting\n");
1320
BXE_SET_ERROR_BIT(sc, BXE_ERR_STATS_TO);
1321
taskqueue_enqueue_timeout(taskqueue_thread,
1322
&sc->sp_err_timeout_task, hz/10);
1323
}
1324
}
1325
return;
1326
}
1327
} else {
1328
/*
1329
* VF doesn't collect HW statistics, and doesn't get completions,
1330
* performs only update.
1331
*/
1332
bxe_storm_stats_update(sc);
1333
}
1334
1335
bxe_net_stats_update(sc);
1336
bxe_drv_stats_update(sc);
1337
1338
/* vf is done */
1339
if (IS_VF(sc)) {
1340
return;
1341
}
1342
1343
bxe_hw_stats_post(sc);
1344
bxe_storm_stats_post(sc);
1345
}
1346
1347
static void
1348
bxe_port_stats_stop(struct bxe_softc *sc)
1349
{
1350
struct dmae_cmd *dmae;
1351
uint32_t opcode;
1352
int loader_idx = PMF_DMAE_C(sc);
1353
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1354
1355
sc->executer_idx = 0;
1356
1357
opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1358
1359
if (sc->port.port_stx) {
1360
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1361
1362
if (sc->func_stx) {
1363
dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1364
} else {
1365
dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1366
}
1367
1368
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1369
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1370
dmae->dst_addr_lo = sc->port.port_stx >> 2;
1371
dmae->dst_addr_hi = 0;
1372
dmae->len = bxe_get_port_stats_dma_len(sc);
1373
if (sc->func_stx) {
1374
dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1375
dmae->comp_addr_hi = 0;
1376
dmae->comp_val = 1;
1377
} else {
1378
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1379
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1380
dmae->comp_val = DMAE_COMP_VAL;
1381
1382
*stats_comp = 0;
1383
}
1384
}
1385
1386
if (sc->func_stx) {
1387
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1388
dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1389
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats));
1390
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats));
1391
dmae->dst_addr_lo = (sc->func_stx >> 2);
1392
dmae->dst_addr_hi = 0;
1393
dmae->len = (sizeof(struct host_func_stats) >> 2);
1394
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1395
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1396
dmae->comp_val = DMAE_COMP_VAL;
1397
1398
*stats_comp = 0;
1399
}
1400
}
1401
1402
static void
1403
bxe_stats_stop(struct bxe_softc *sc)
1404
{
1405
uint8_t update = FALSE;
1406
1407
bxe_stats_comp(sc);
1408
1409
if (sc->port.pmf) {
1410
update = bxe_hw_stats_update(sc) == 0;
1411
}
1412
1413
update |= bxe_storm_stats_update(sc) == 0;
1414
1415
if (update) {
1416
bxe_net_stats_update(sc);
1417
1418
if (sc->port.pmf) {
1419
bxe_port_stats_stop(sc);
1420
}
1421
1422
bxe_hw_stats_post(sc);
1423
bxe_stats_comp(sc);
1424
}
1425
}
1426
1427
static void
1428
bxe_stats_do_nothing(struct bxe_softc *sc)
1429
{
1430
return;
1431
}
1432
1433
static const struct {
1434
void (*action)(struct bxe_softc *sc);
1435
enum bxe_stats_state next_state;
1436
} bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1437
{
1438
/* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED },
1439
/* LINK_UP */ { bxe_stats_start, STATS_STATE_ENABLED },
1440
/* UPDATE */ { bxe_stats_do_nothing, STATS_STATE_DISABLED },
1441
/* STOP */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }
1442
},
1443
{
1444
/* ENABLED PMF */ { bxe_stats_pmf_start, STATS_STATE_ENABLED },
1445
/* LINK_UP */ { bxe_stats_restart, STATS_STATE_ENABLED },
1446
/* UPDATE */ { bxe_stats_update, STATS_STATE_ENABLED },
1447
/* STOP */ { bxe_stats_stop, STATS_STATE_DISABLED }
1448
}
1449
};
1450
1451
void bxe_stats_handle(struct bxe_softc *sc,
1452
enum bxe_stats_event event)
1453
{
1454
enum bxe_stats_state state;
1455
1456
if (__predict_false(sc->panic)) {
1457
return;
1458
}
1459
1460
BXE_STATS_LOCK(sc);
1461
state = sc->stats_state;
1462
sc->stats_state = bxe_stats_stm[state][event].next_state;
1463
BXE_STATS_UNLOCK(sc);
1464
1465
bxe_stats_stm[state][event].action(sc);
1466
1467
if (event != STATS_EVENT_UPDATE) {
1468
BLOGD(sc, DBG_STATS,
1469
"state %d -> event %d -> state %d\n",
1470
state, event, sc->stats_state);
1471
}
1472
}
1473
1474
static void
1475
bxe_port_stats_base_init(struct bxe_softc *sc)
1476
{
1477
struct dmae_cmd *dmae;
1478
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
1479
1480
/* sanity */
1481
if (!sc->port.pmf || !sc->port.port_stx) {
1482
BLOGE(sc, "BUG!\n");
1483
return;
1484
}
1485
1486
sc->executer_idx = 0;
1487
1488
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
1489
dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1490
TRUE, DMAE_COMP_PCI);
1491
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats));
1492
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats));
1493
dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1494
dmae->dst_addr_hi = 0;
1495
dmae->len = bxe_get_port_stats_dma_len(sc);
1496
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp));
1497
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
1498
dmae->comp_val = DMAE_COMP_VAL;
1499
1500
*stats_comp = 0;
1501
bxe_hw_stats_post(sc);
1502
bxe_stats_comp(sc);
1503
}
1504
1505
/*
1506
* This function will prepare the statistics ramrod data the way
1507
* we will only have to increment the statistics counter and
1508
* send the ramrod each time we have to.
1509
*/
1510
static void
1511
bxe_prep_fw_stats_req(struct bxe_softc *sc)
1512
{
1513
int i;
1514
int first_queue_query_index;
1515
struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1516
bus_addr_t cur_data_offset;
1517
struct stats_query_entry *cur_query_entry;
1518
1519
stats_hdr->cmd_num = sc->fw_stats_num;
1520
stats_hdr->drv_stats_counter = 0;
1521
1522
/*
1523
* The storm_counters struct contains the counters of completed
1524
* statistics requests per storm which are incremented by FW
1525
* each time it completes hadning a statistics ramrod. We will
1526
* check these counters in the timer handler and discard a
1527
* (statistics) ramrod completion.
1528
*/
1529
cur_data_offset = (sc->fw_stats_data_mapping +
1530
offsetof(struct bxe_fw_stats_data, storm_counters));
1531
1532
stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1533
stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1534
1535
/*
1536
* Prepare the first stats ramrod (will be completed with
1537
* the counters equal to zero) - init counters to somethig different.
1538
*/
1539
memset(&sc->fw_stats_data->storm_counters, 0xff,
1540
sizeof(struct stats_counter));
1541
1542
/**** Port FW statistics data ****/
1543
cur_data_offset = (sc->fw_stats_data_mapping +
1544
offsetof(struct bxe_fw_stats_data, port));
1545
1546
cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX];
1547
1548
cur_query_entry->kind = STATS_TYPE_PORT;
1549
/* For port query index is a DONT CARE */
1550
cur_query_entry->index = SC_PORT(sc);
1551
/* For port query funcID is a DONT CARE */
1552
cur_query_entry->funcID = htole16(SC_FUNC(sc));
1553
cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1554
cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1555
1556
/**** PF FW statistics data ****/
1557
cur_data_offset = (sc->fw_stats_data_mapping +
1558
offsetof(struct bxe_fw_stats_data, pf));
1559
1560
cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX];
1561
1562
cur_query_entry->kind = STATS_TYPE_PF;
1563
/* For PF query index is a DONT CARE */
1564
cur_query_entry->index = SC_PORT(sc);
1565
cur_query_entry->funcID = htole16(SC_FUNC(sc));
1566
cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1567
cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1568
1569
/**** Clients' queries ****/
1570
cur_data_offset = (sc->fw_stats_data_mapping +
1571
offsetof(struct bxe_fw_stats_data, queue_stats));
1572
1573
/*
1574
* First queue query index depends whether FCoE offloaded request will
1575
* be included in the ramrod
1576
*/
1577
first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1);
1578
1579
for (i = 0; i < sc->num_queues; i++) {
1580
cur_query_entry =
1581
&sc->fw_stats_req->query[first_queue_query_index + i];
1582
1583
cur_query_entry->kind = STATS_TYPE_QUEUE;
1584
cur_query_entry->index = bxe_stats_id(&sc->fp[i]);
1585
cur_query_entry->funcID = htole16(SC_FUNC(sc));
1586
cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1587
cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1588
1589
cur_data_offset += sizeof(struct per_queue_stats);
1590
}
1591
}
1592
1593
void
1594
bxe_stats_init(struct bxe_softc *sc)
1595
{
1596
int /*abs*/port = SC_PORT(sc);
1597
int mb_idx = SC_FW_MB_IDX(sc);
1598
int i;
1599
1600
sc->stats_pending = 0;
1601
sc->executer_idx = 0;
1602
sc->stats_counter = 0;
1603
1604
/* port and func stats for management */
1605
if (!BXE_NOMCP(sc)) {
1606
sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1607
sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1608
} else {
1609
sc->port.port_stx = 0;
1610
sc->func_stx = 0;
1611
}
1612
1613
BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1614
sc->port.port_stx, sc->func_stx);
1615
1616
/* pmf should retrieve port statistics from SP on a non-init*/
1617
if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) {
1618
bxe_stats_handle(sc, STATS_EVENT_PMF);
1619
}
1620
1621
port = SC_PORT(sc);
1622
/* port stats */
1623
memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1624
sc->port.old_nig_stats.brb_discard =
1625
REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1626
sc->port.old_nig_stats.brb_truncate =
1627
REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1628
if (!CHIP_IS_E3(sc)) {
1629
REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1630
&(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1631
REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1632
&(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1633
}
1634
1635
/* function stats */
1636
for (i = 0; i < sc->num_queues; i++) {
1637
memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1638
memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1639
memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1640
if (sc->stats_init) {
1641
memset(&sc->fp[i].eth_q_stats, 0,
1642
sizeof(sc->fp[i].eth_q_stats));
1643
memset(&sc->fp[i].eth_q_stats_old, 0,
1644
sizeof(sc->fp[i].eth_q_stats_old));
1645
}
1646
}
1647
1648
/* prepare statistics ramrod data */
1649
bxe_prep_fw_stats_req(sc);
1650
1651
if (sc->stats_init) {
1652
memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1653
memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1654
memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1655
memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1656
memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1657
1658
/* Clean SP from previous statistics */
1659
if (sc->func_stx) {
1660
memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1661
bxe_func_stats_init(sc);
1662
bxe_hw_stats_post(sc);
1663
bxe_stats_comp(sc);
1664
}
1665
}
1666
1667
sc->stats_state = STATS_STATE_DISABLED;
1668
1669
if (sc->port.pmf && sc->port.port_stx) {
1670
bxe_port_stats_base_init(sc);
1671
}
1672
1673
/* mark the end of statistics initialization */
1674
sc->stats_init = FALSE;
1675
}
1676
1677
void
1678
bxe_save_statistics(struct bxe_softc *sc)
1679
{
1680
int i;
1681
1682
/* save queue statistics */
1683
for (i = 0; i < sc->num_queues; i++) {
1684
struct bxe_fastpath *fp = &sc->fp[i];
1685
struct bxe_eth_q_stats *qstats = &fp->eth_q_stats;
1686
struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1687
1688
UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1689
UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1690
UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1691
UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1692
UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1693
UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1694
UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1695
UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1696
UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1697
UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1698
UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1699
UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1700
UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1701
UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1702
}
1703
1704
/* store port firmware statistics */
1705
if (sc->port.pmf) {
1706
struct bxe_eth_stats *estats = &sc->eth_stats;
1707
struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1708
struct host_port_stats *pstats = BXE_SP(sc, port_stats);
1709
1710
fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1711
fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1712
fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1713
fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1714
1715
if (IS_MF(sc)) {
1716
UPDATE_FW_STAT_OLD(mac_filter_discard);
1717
UPDATE_FW_STAT_OLD(mf_tag_discard);
1718
UPDATE_FW_STAT_OLD(brb_truncate_discard);
1719
UPDATE_FW_STAT_OLD(mac_discard);
1720
}
1721
}
1722
}
1723
1724
void
1725
bxe_afex_collect_stats(struct bxe_softc *sc,
1726
void *void_afex_stats,
1727
uint32_t stats_type)
1728
{
1729
int i;
1730
struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1731
struct bxe_eth_stats *estats = &sc->eth_stats;
1732
1733
memset(afex_stats, 0, sizeof(struct afex_stats));
1734
1735
for (i = 0; i < sc->num_queues; i++) {
1736
struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1737
1738
ADD_64(afex_stats->rx_unicast_bytes_hi,
1739
qstats->total_unicast_bytes_received_hi,
1740
afex_stats->rx_unicast_bytes_lo,
1741
qstats->total_unicast_bytes_received_lo);
1742
1743
ADD_64(afex_stats->rx_broadcast_bytes_hi,
1744
qstats->total_broadcast_bytes_received_hi,
1745
afex_stats->rx_broadcast_bytes_lo,
1746
qstats->total_broadcast_bytes_received_lo);
1747
1748
ADD_64(afex_stats->rx_multicast_bytes_hi,
1749
qstats->total_multicast_bytes_received_hi,
1750
afex_stats->rx_multicast_bytes_lo,
1751
qstats->total_multicast_bytes_received_lo);
1752
1753
ADD_64(afex_stats->rx_unicast_frames_hi,
1754
qstats->total_unicast_packets_received_hi,
1755
afex_stats->rx_unicast_frames_lo,
1756
qstats->total_unicast_packets_received_lo);
1757
1758
ADD_64(afex_stats->rx_broadcast_frames_hi,
1759
qstats->total_broadcast_packets_received_hi,
1760
afex_stats->rx_broadcast_frames_lo,
1761
qstats->total_broadcast_packets_received_lo);
1762
1763
ADD_64(afex_stats->rx_multicast_frames_hi,
1764
qstats->total_multicast_packets_received_hi,
1765
afex_stats->rx_multicast_frames_lo,
1766
qstats->total_multicast_packets_received_lo);
1767
1768
/*
1769
* sum to rx_frames_discarded all discarded
1770
* packets due to size, ttl0 and checksum
1771
*/
1772
ADD_64(afex_stats->rx_frames_discarded_hi,
1773
qstats->total_packets_received_checksum_discarded_hi,
1774
afex_stats->rx_frames_discarded_lo,
1775
qstats->total_packets_received_checksum_discarded_lo);
1776
1777
ADD_64(afex_stats->rx_frames_discarded_hi,
1778
qstats->total_packets_received_ttl0_discarded_hi,
1779
afex_stats->rx_frames_discarded_lo,
1780
qstats->total_packets_received_ttl0_discarded_lo);
1781
1782
ADD_64(afex_stats->rx_frames_discarded_hi,
1783
qstats->etherstatsoverrsizepkts_hi,
1784
afex_stats->rx_frames_discarded_lo,
1785
qstats->etherstatsoverrsizepkts_lo);
1786
1787
ADD_64(afex_stats->rx_frames_dropped_hi,
1788
qstats->no_buff_discard_hi,
1789
afex_stats->rx_frames_dropped_lo,
1790
qstats->no_buff_discard_lo);
1791
1792
ADD_64(afex_stats->tx_unicast_bytes_hi,
1793
qstats->total_unicast_bytes_transmitted_hi,
1794
afex_stats->tx_unicast_bytes_lo,
1795
qstats->total_unicast_bytes_transmitted_lo);
1796
1797
ADD_64(afex_stats->tx_broadcast_bytes_hi,
1798
qstats->total_broadcast_bytes_transmitted_hi,
1799
afex_stats->tx_broadcast_bytes_lo,
1800
qstats->total_broadcast_bytes_transmitted_lo);
1801
1802
ADD_64(afex_stats->tx_multicast_bytes_hi,
1803
qstats->total_multicast_bytes_transmitted_hi,
1804
afex_stats->tx_multicast_bytes_lo,
1805
qstats->total_multicast_bytes_transmitted_lo);
1806
1807
ADD_64(afex_stats->tx_unicast_frames_hi,
1808
qstats->total_unicast_packets_transmitted_hi,
1809
afex_stats->tx_unicast_frames_lo,
1810
qstats->total_unicast_packets_transmitted_lo);
1811
1812
ADD_64(afex_stats->tx_broadcast_frames_hi,
1813
qstats->total_broadcast_packets_transmitted_hi,
1814
afex_stats->tx_broadcast_frames_lo,
1815
qstats->total_broadcast_packets_transmitted_lo);
1816
1817
ADD_64(afex_stats->tx_multicast_frames_hi,
1818
qstats->total_multicast_packets_transmitted_hi,
1819
afex_stats->tx_multicast_frames_lo,
1820
qstats->total_multicast_packets_transmitted_lo);
1821
1822
ADD_64(afex_stats->tx_frames_dropped_hi,
1823
qstats->total_transmitted_dropped_packets_error_hi,
1824
afex_stats->tx_frames_dropped_lo,
1825
qstats->total_transmitted_dropped_packets_error_lo);
1826
}
1827
1828
/*
1829
* If port stats are requested, add them to the PMF
1830
* stats, as anyway they will be accumulated by the
1831
* MCP before sent to the switch
1832
*/
1833
if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1834
ADD_64(afex_stats->rx_frames_dropped_hi,
1835
0,
1836
afex_stats->rx_frames_dropped_lo,
1837
estats->mac_filter_discard);
1838
ADD_64(afex_stats->rx_frames_dropped_hi,
1839
0,
1840
afex_stats->rx_frames_dropped_lo,
1841
estats->brb_truncate_discard);
1842
ADD_64(afex_stats->rx_frames_discarded_hi,
1843
0,
1844
afex_stats->rx_frames_discarded_lo,
1845
estats->mac_discard);
1846
}
1847
}
1848
1849
1850