Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/bfe/if_bfe.c
39507 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2003 Stuart Walsh<[email protected]>
5
* and Duncan Barclay<[email protected]>
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/systm.h>
31
#include <sys/bus.h>
32
#include <sys/endian.h>
33
#include <sys/kernel.h>
34
#include <sys/malloc.h>
35
#include <sys/mbuf.h>
36
#include <sys/module.h>
37
#include <sys/rman.h>
38
#include <sys/socket.h>
39
#include <sys/sockio.h>
40
#include <sys/sysctl.h>
41
42
#include <net/bpf.h>
43
#include <net/if.h>
44
#include <net/if_var.h>
45
#include <net/ethernet.h>
46
#include <net/if_dl.h>
47
#include <net/if_media.h>
48
#include <net/if_types.h>
49
#include <net/if_vlan_var.h>
50
51
#include <dev/mii/mii.h>
52
#include <dev/mii/miivar.h>
53
54
#include <dev/pci/pcireg.h>
55
#include <dev/pci/pcivar.h>
56
57
#include <machine/bus.h>
58
59
#include <dev/bfe/if_bfereg.h>
60
61
MODULE_DEPEND(bfe, pci, 1, 1, 1);
62
MODULE_DEPEND(bfe, ether, 1, 1, 1);
63
MODULE_DEPEND(bfe, miibus, 1, 1, 1);
64
65
/* "device miibus" required. See GENERIC if you get errors here. */
66
#include "miibus_if.h"
67
68
#define BFE_DEVDESC_MAX 64 /* Maximum device description length */
69
70
static struct bfe_type bfe_devs[] = {
71
{ BCOM_VENDORID, BCOM_DEVICEID_BCM4401,
72
"Broadcom BCM4401 Fast Ethernet" },
73
{ BCOM_VENDORID, BCOM_DEVICEID_BCM4401B0,
74
"Broadcom BCM4401-B0 Fast Ethernet" },
75
{ 0, 0, NULL }
76
};
77
78
static int bfe_probe (device_t);
79
static int bfe_attach (device_t);
80
static int bfe_detach (device_t);
81
static int bfe_suspend (device_t);
82
static int bfe_resume (device_t);
83
static void bfe_release_resources (struct bfe_softc *);
84
static void bfe_intr (void *);
85
static int bfe_encap (struct bfe_softc *, struct mbuf **);
86
static void bfe_start (if_t);
87
static void bfe_start_locked (if_t);
88
static int bfe_ioctl (if_t, u_long, caddr_t);
89
static void bfe_init (void *);
90
static void bfe_init_locked (void *);
91
static void bfe_stop (struct bfe_softc *);
92
static void bfe_watchdog (struct bfe_softc *);
93
static int bfe_shutdown (device_t);
94
static void bfe_tick (void *);
95
static void bfe_txeof (struct bfe_softc *);
96
static void bfe_rxeof (struct bfe_softc *);
97
static void bfe_set_rx_mode (struct bfe_softc *);
98
static int bfe_list_rx_init (struct bfe_softc *);
99
static void bfe_list_tx_init (struct bfe_softc *);
100
static void bfe_discard_buf (struct bfe_softc *, int);
101
static int bfe_list_newbuf (struct bfe_softc *, int);
102
static void bfe_rx_ring_free (struct bfe_softc *);
103
104
static void bfe_pci_setup (struct bfe_softc *, u_int32_t);
105
static int bfe_ifmedia_upd (if_t);
106
static void bfe_ifmedia_sts (if_t, struct ifmediareq *);
107
static int bfe_miibus_readreg (device_t, int, int);
108
static int bfe_miibus_writereg (device_t, int, int, int);
109
static void bfe_miibus_statchg (device_t);
110
static int bfe_wait_bit (struct bfe_softc *, u_int32_t, u_int32_t,
111
u_long, const int);
112
static void bfe_get_config (struct bfe_softc *sc);
113
static void bfe_read_eeprom (struct bfe_softc *, u_int8_t *);
114
static void bfe_stats_update (struct bfe_softc *);
115
static void bfe_clear_stats (struct bfe_softc *);
116
static int bfe_readphy (struct bfe_softc *, u_int32_t, u_int32_t*);
117
static int bfe_writephy (struct bfe_softc *, u_int32_t, u_int32_t);
118
static int bfe_resetphy (struct bfe_softc *);
119
static int bfe_setupphy (struct bfe_softc *);
120
static void bfe_chip_reset (struct bfe_softc *);
121
static void bfe_chip_halt (struct bfe_softc *);
122
static void bfe_core_reset (struct bfe_softc *);
123
static void bfe_core_disable (struct bfe_softc *);
124
static int bfe_dma_alloc (struct bfe_softc *);
125
static void bfe_dma_free (struct bfe_softc *sc);
126
static void bfe_dma_map (void *, bus_dma_segment_t *, int, int);
127
static void bfe_cam_write (struct bfe_softc *, u_char *, int);
128
static int sysctl_bfe_stats (SYSCTL_HANDLER_ARGS);
129
130
static device_method_t bfe_methods[] = {
131
/* Device interface */
132
DEVMETHOD(device_probe, bfe_probe),
133
DEVMETHOD(device_attach, bfe_attach),
134
DEVMETHOD(device_detach, bfe_detach),
135
DEVMETHOD(device_shutdown, bfe_shutdown),
136
DEVMETHOD(device_suspend, bfe_suspend),
137
DEVMETHOD(device_resume, bfe_resume),
138
139
/* MII interface */
140
DEVMETHOD(miibus_readreg, bfe_miibus_readreg),
141
DEVMETHOD(miibus_writereg, bfe_miibus_writereg),
142
DEVMETHOD(miibus_statchg, bfe_miibus_statchg),
143
144
DEVMETHOD_END
145
};
146
147
static driver_t bfe_driver = {
148
"bfe",
149
bfe_methods,
150
sizeof(struct bfe_softc)
151
};
152
153
DRIVER_MODULE(bfe, pci, bfe_driver, 0, 0);
154
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, bfe, bfe_devs,
155
nitems(bfe_devs) - 1);
156
DRIVER_MODULE(miibus, bfe, miibus_driver, 0, 0);
157
158
/*
159
* Probe for a Broadcom 4401 chip.
160
*/
161
static int
162
bfe_probe(device_t dev)
163
{
164
struct bfe_type *t;
165
166
t = bfe_devs;
167
168
while (t->bfe_name != NULL) {
169
if (pci_get_vendor(dev) == t->bfe_vid &&
170
pci_get_device(dev) == t->bfe_did) {
171
device_set_desc(dev, t->bfe_name);
172
return (BUS_PROBE_DEFAULT);
173
}
174
t++;
175
}
176
177
return (ENXIO);
178
}
179
180
struct bfe_dmamap_arg {
181
bus_addr_t bfe_busaddr;
182
};
183
184
static int
185
bfe_dma_alloc(struct bfe_softc *sc)
186
{
187
struct bfe_dmamap_arg ctx;
188
struct bfe_rx_data *rd;
189
struct bfe_tx_data *td;
190
int error, i;
191
192
/*
193
* parent tag. Apparently the chip cannot handle any DMA address
194
* greater than 1GB.
195
*/
196
error = bus_dma_tag_create(bus_get_dma_tag(sc->bfe_dev), /* parent */
197
1, 0, /* alignment, boundary */
198
BFE_DMA_MAXADDR, /* lowaddr */
199
BUS_SPACE_MAXADDR, /* highaddr */
200
NULL, NULL, /* filter, filterarg */
201
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
202
0, /* nsegments */
203
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
204
0, /* flags */
205
NULL, NULL, /* lockfunc, lockarg */
206
&sc->bfe_parent_tag);
207
if (error != 0) {
208
device_printf(sc->bfe_dev, "cannot create parent DMA tag.\n");
209
goto fail;
210
}
211
212
/* Create tag for Tx ring. */
213
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
214
BFE_TX_RING_ALIGN, 0, /* alignment, boundary */
215
BUS_SPACE_MAXADDR, /* lowaddr */
216
BUS_SPACE_MAXADDR, /* highaddr */
217
NULL, NULL, /* filter, filterarg */
218
BFE_TX_LIST_SIZE, /* maxsize */
219
1, /* nsegments */
220
BFE_TX_LIST_SIZE, /* maxsegsize */
221
0, /* flags */
222
NULL, NULL, /* lockfunc, lockarg */
223
&sc->bfe_tx_tag);
224
if (error != 0) {
225
device_printf(sc->bfe_dev, "cannot create Tx ring DMA tag.\n");
226
goto fail;
227
}
228
229
/* Create tag for Rx ring. */
230
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
231
BFE_RX_RING_ALIGN, 0, /* alignment, boundary */
232
BUS_SPACE_MAXADDR, /* lowaddr */
233
BUS_SPACE_MAXADDR, /* highaddr */
234
NULL, NULL, /* filter, filterarg */
235
BFE_RX_LIST_SIZE, /* maxsize */
236
1, /* nsegments */
237
BFE_RX_LIST_SIZE, /* maxsegsize */
238
0, /* flags */
239
NULL, NULL, /* lockfunc, lockarg */
240
&sc->bfe_rx_tag);
241
if (error != 0) {
242
device_printf(sc->bfe_dev, "cannot create Rx ring DMA tag.\n");
243
goto fail;
244
}
245
246
/* Create tag for Tx buffers. */
247
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
248
1, 0, /* alignment, boundary */
249
BUS_SPACE_MAXADDR, /* lowaddr */
250
BUS_SPACE_MAXADDR, /* highaddr */
251
NULL, NULL, /* filter, filterarg */
252
MCLBYTES * BFE_MAXTXSEGS, /* maxsize */
253
BFE_MAXTXSEGS, /* nsegments */
254
MCLBYTES, /* maxsegsize */
255
0, /* flags */
256
NULL, NULL, /* lockfunc, lockarg */
257
&sc->bfe_txmbuf_tag);
258
if (error != 0) {
259
device_printf(sc->bfe_dev,
260
"cannot create Tx buffer DMA tag.\n");
261
goto fail;
262
}
263
264
/* Create tag for Rx buffers. */
265
error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */
266
1, 0, /* alignment, boundary */
267
BUS_SPACE_MAXADDR, /* lowaddr */
268
BUS_SPACE_MAXADDR, /* highaddr */
269
NULL, NULL, /* filter, filterarg */
270
MCLBYTES, /* maxsize */
271
1, /* nsegments */
272
MCLBYTES, /* maxsegsize */
273
0, /* flags */
274
NULL, NULL, /* lockfunc, lockarg */
275
&sc->bfe_rxmbuf_tag);
276
if (error != 0) {
277
device_printf(sc->bfe_dev,
278
"cannot create Rx buffer DMA tag.\n");
279
goto fail;
280
}
281
282
/* Allocate DMA'able memory and load DMA map. */
283
error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list,
284
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_tx_map);
285
if (error != 0) {
286
device_printf(sc->bfe_dev,
287
"cannot allocate DMA'able memory for Tx ring.\n");
288
goto fail;
289
}
290
ctx.bfe_busaddr = 0;
291
error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map,
292
sc->bfe_tx_list, BFE_TX_LIST_SIZE, bfe_dma_map, &ctx,
293
BUS_DMA_NOWAIT);
294
if (error != 0 || ctx.bfe_busaddr == 0) {
295
device_printf(sc->bfe_dev,
296
"cannot load DMA'able memory for Tx ring.\n");
297
goto fail;
298
}
299
sc->bfe_tx_dma = BFE_ADDR_LO(ctx.bfe_busaddr);
300
301
error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list,
302
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_rx_map);
303
if (error != 0) {
304
device_printf(sc->bfe_dev,
305
"cannot allocate DMA'able memory for Rx ring.\n");
306
goto fail;
307
}
308
ctx.bfe_busaddr = 0;
309
error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map,
310
sc->bfe_rx_list, BFE_RX_LIST_SIZE, bfe_dma_map, &ctx,
311
BUS_DMA_NOWAIT);
312
if (error != 0 || ctx.bfe_busaddr == 0) {
313
device_printf(sc->bfe_dev,
314
"cannot load DMA'able memory for Rx ring.\n");
315
goto fail;
316
}
317
sc->bfe_rx_dma = BFE_ADDR_LO(ctx.bfe_busaddr);
318
319
/* Create DMA maps for Tx buffers. */
320
for (i = 0; i < BFE_TX_LIST_CNT; i++) {
321
td = &sc->bfe_tx_ring[i];
322
td->bfe_mbuf = NULL;
323
td->bfe_map = NULL;
324
error = bus_dmamap_create(sc->bfe_txmbuf_tag, 0, &td->bfe_map);
325
if (error != 0) {
326
device_printf(sc->bfe_dev,
327
"cannot create DMA map for Tx.\n");
328
goto fail;
329
}
330
}
331
332
/* Create spare DMA map for Rx buffers. */
333
error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &sc->bfe_rx_sparemap);
334
if (error != 0) {
335
device_printf(sc->bfe_dev, "cannot create spare DMA map for Rx.\n");
336
goto fail;
337
}
338
/* Create DMA maps for Rx buffers. */
339
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
340
rd = &sc->bfe_rx_ring[i];
341
rd->bfe_mbuf = NULL;
342
rd->bfe_map = NULL;
343
rd->bfe_ctrl = 0;
344
error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &rd->bfe_map);
345
if (error != 0) {
346
device_printf(sc->bfe_dev,
347
"cannot create DMA map for Rx.\n");
348
goto fail;
349
}
350
}
351
352
fail:
353
return (error);
354
}
355
356
static void
357
bfe_dma_free(struct bfe_softc *sc)
358
{
359
struct bfe_tx_data *td;
360
struct bfe_rx_data *rd;
361
int i;
362
363
/* Tx ring. */
364
if (sc->bfe_tx_tag != NULL) {
365
if (sc->bfe_tx_dma != 0)
366
bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map);
367
if (sc->bfe_tx_list != NULL)
368
bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list,
369
sc->bfe_tx_map);
370
sc->bfe_tx_dma = 0;
371
sc->bfe_tx_list = NULL;
372
bus_dma_tag_destroy(sc->bfe_tx_tag);
373
sc->bfe_tx_tag = NULL;
374
}
375
376
/* Rx ring. */
377
if (sc->bfe_rx_tag != NULL) {
378
if (sc->bfe_rx_dma != 0)
379
bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map);
380
if (sc->bfe_rx_list != NULL)
381
bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list,
382
sc->bfe_rx_map);
383
sc->bfe_rx_dma = 0;
384
sc->bfe_rx_list = NULL;
385
bus_dma_tag_destroy(sc->bfe_rx_tag);
386
sc->bfe_rx_tag = NULL;
387
}
388
389
/* Tx buffers. */
390
if (sc->bfe_txmbuf_tag != NULL) {
391
for (i = 0; i < BFE_TX_LIST_CNT; i++) {
392
td = &sc->bfe_tx_ring[i];
393
if (td->bfe_map != NULL) {
394
bus_dmamap_destroy(sc->bfe_txmbuf_tag,
395
td->bfe_map);
396
td->bfe_map = NULL;
397
}
398
}
399
bus_dma_tag_destroy(sc->bfe_txmbuf_tag);
400
sc->bfe_txmbuf_tag = NULL;
401
}
402
403
/* Rx buffers. */
404
if (sc->bfe_rxmbuf_tag != NULL) {
405
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
406
rd = &sc->bfe_rx_ring[i];
407
if (rd->bfe_map != NULL) {
408
bus_dmamap_destroy(sc->bfe_rxmbuf_tag,
409
rd->bfe_map);
410
rd->bfe_map = NULL;
411
}
412
}
413
if (sc->bfe_rx_sparemap != NULL) {
414
bus_dmamap_destroy(sc->bfe_rxmbuf_tag,
415
sc->bfe_rx_sparemap);
416
sc->bfe_rx_sparemap = NULL;
417
}
418
bus_dma_tag_destroy(sc->bfe_rxmbuf_tag);
419
sc->bfe_rxmbuf_tag = NULL;
420
}
421
422
if (sc->bfe_parent_tag != NULL) {
423
bus_dma_tag_destroy(sc->bfe_parent_tag);
424
sc->bfe_parent_tag = NULL;
425
}
426
}
427
428
static int
429
bfe_attach(device_t dev)
430
{
431
if_t ifp = NULL;
432
struct bfe_softc *sc;
433
int error = 0, rid;
434
435
sc = device_get_softc(dev);
436
mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
437
MTX_DEF);
438
callout_init_mtx(&sc->bfe_stat_co, &sc->bfe_mtx, 0);
439
440
sc->bfe_dev = dev;
441
442
/*
443
* Map control/status registers.
444
*/
445
pci_enable_busmaster(dev);
446
447
rid = PCIR_BAR(0);
448
sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
449
RF_ACTIVE);
450
if (sc->bfe_res == NULL) {
451
device_printf(dev, "couldn't map memory\n");
452
error = ENXIO;
453
goto fail;
454
}
455
456
/* Allocate interrupt */
457
rid = 0;
458
459
sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
460
RF_SHAREABLE | RF_ACTIVE);
461
if (sc->bfe_irq == NULL) {
462
device_printf(dev, "couldn't map interrupt\n");
463
error = ENXIO;
464
goto fail;
465
}
466
467
if (bfe_dma_alloc(sc) != 0) {
468
device_printf(dev, "failed to allocate DMA resources\n");
469
error = ENXIO;
470
goto fail;
471
}
472
473
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
474
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
475
"stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
476
sysctl_bfe_stats, "I", "Statistics");
477
478
/* Set up ifnet structure */
479
ifp = sc->bfe_ifp = if_alloc(IFT_ETHER);
480
if_setsoftc(ifp, sc);
481
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
482
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
483
if_setioctlfn(ifp, bfe_ioctl);
484
if_setstartfn(ifp, bfe_start);
485
if_setinitfn(ifp, bfe_init);
486
if_setsendqlen(ifp, BFE_TX_QLEN);
487
if_setsendqready(ifp);
488
489
bfe_get_config(sc);
490
491
/* Reset the chip and turn on the PHY */
492
BFE_LOCK(sc);
493
bfe_chip_reset(sc);
494
BFE_UNLOCK(sc);
495
496
error = mii_attach(dev, &sc->bfe_miibus, ifp, bfe_ifmedia_upd,
497
bfe_ifmedia_sts, BMSR_DEFCAPMASK, sc->bfe_phyaddr, MII_OFFSET_ANY,
498
0);
499
if (error != 0) {
500
device_printf(dev, "attaching PHYs failed\n");
501
goto fail;
502
}
503
504
ether_ifattach(ifp, sc->bfe_enaddr);
505
506
/*
507
* Tell the upper layer(s) we support long frames.
508
*/
509
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
510
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
511
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
512
513
/*
514
* Hook interrupt last to avoid having to lock softc
515
*/
516
error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
517
NULL, bfe_intr, sc, &sc->bfe_intrhand);
518
519
if (error) {
520
device_printf(dev, "couldn't set up irq\n");
521
goto fail;
522
}
523
fail:
524
if (error != 0)
525
bfe_detach(dev);
526
return (error);
527
}
528
529
static int
530
bfe_detach(device_t dev)
531
{
532
struct bfe_softc *sc;
533
if_t ifp;
534
535
sc = device_get_softc(dev);
536
537
ifp = sc->bfe_ifp;
538
539
if (device_is_attached(dev)) {
540
BFE_LOCK(sc);
541
sc->bfe_flags |= BFE_FLAG_DETACH;
542
bfe_stop(sc);
543
BFE_UNLOCK(sc);
544
callout_drain(&sc->bfe_stat_co);
545
if (ifp != NULL)
546
ether_ifdetach(ifp);
547
}
548
549
BFE_LOCK(sc);
550
bfe_chip_reset(sc);
551
BFE_UNLOCK(sc);
552
553
bus_generic_detach(dev);
554
555
bfe_release_resources(sc);
556
bfe_dma_free(sc);
557
mtx_destroy(&sc->bfe_mtx);
558
559
return (0);
560
}
561
562
/*
563
* Stop all chip I/O so that the kernel's probe routines don't
564
* get confused by errant DMAs when rebooting.
565
*/
566
static int
567
bfe_shutdown(device_t dev)
568
{
569
struct bfe_softc *sc;
570
571
sc = device_get_softc(dev);
572
BFE_LOCK(sc);
573
bfe_stop(sc);
574
575
BFE_UNLOCK(sc);
576
577
return (0);
578
}
579
580
static int
581
bfe_suspend(device_t dev)
582
{
583
struct bfe_softc *sc;
584
585
sc = device_get_softc(dev);
586
BFE_LOCK(sc);
587
bfe_stop(sc);
588
BFE_UNLOCK(sc);
589
590
return (0);
591
}
592
593
static int
594
bfe_resume(device_t dev)
595
{
596
struct bfe_softc *sc;
597
if_t ifp;
598
599
sc = device_get_softc(dev);
600
ifp = sc->bfe_ifp;
601
BFE_LOCK(sc);
602
bfe_chip_reset(sc);
603
if (if_getflags(ifp) & IFF_UP) {
604
bfe_init_locked(sc);
605
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
606
!if_sendq_empty(ifp))
607
bfe_start_locked(ifp);
608
}
609
BFE_UNLOCK(sc);
610
611
return (0);
612
}
613
614
static int
615
bfe_miibus_readreg(device_t dev, int phy, int reg)
616
{
617
struct bfe_softc *sc;
618
u_int32_t ret;
619
620
sc = device_get_softc(dev);
621
bfe_readphy(sc, reg, &ret);
622
623
return (ret);
624
}
625
626
static int
627
bfe_miibus_writereg(device_t dev, int phy, int reg, int val)
628
{
629
struct bfe_softc *sc;
630
631
sc = device_get_softc(dev);
632
bfe_writephy(sc, reg, val);
633
634
return (0);
635
}
636
637
static void
638
bfe_miibus_statchg(device_t dev)
639
{
640
struct bfe_softc *sc;
641
struct mii_data *mii;
642
u_int32_t val;
643
#ifdef notyet
644
u_int32_t flow;
645
#endif
646
647
sc = device_get_softc(dev);
648
mii = device_get_softc(sc->bfe_miibus);
649
650
sc->bfe_flags &= ~BFE_FLAG_LINK;
651
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
652
(IFM_ACTIVE | IFM_AVALID)) {
653
switch (IFM_SUBTYPE(mii->mii_media_active)) {
654
case IFM_10_T:
655
case IFM_100_TX:
656
sc->bfe_flags |= BFE_FLAG_LINK;
657
break;
658
default:
659
break;
660
}
661
}
662
663
/* XXX Should stop Rx/Tx engine prior to touching MAC. */
664
val = CSR_READ_4(sc, BFE_TX_CTRL);
665
val &= ~BFE_TX_DUPLEX;
666
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
667
val |= BFE_TX_DUPLEX;
668
#ifdef notyet
669
flow = CSR_READ_4(sc, BFE_RXCONF);
670
flow &= ~BFE_RXCONF_FLOW;
671
if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
672
IFM_ETH_RXPAUSE) != 0)
673
flow |= BFE_RXCONF_FLOW;
674
CSR_WRITE_4(sc, BFE_RXCONF, flow);
675
/*
676
* It seems that the hardware has Tx pause issues
677
* so enable only Rx pause.
678
*/
679
flow = CSR_READ_4(sc, BFE_MAC_FLOW);
680
flow &= ~BFE_FLOW_PAUSE_ENAB;
681
CSR_WRITE_4(sc, BFE_MAC_FLOW, flow);
682
#endif
683
}
684
CSR_WRITE_4(sc, BFE_TX_CTRL, val);
685
}
686
687
static void
688
bfe_tx_ring_free(struct bfe_softc *sc)
689
{
690
int i;
691
692
for(i = 0; i < BFE_TX_LIST_CNT; i++) {
693
if (sc->bfe_tx_ring[i].bfe_mbuf != NULL) {
694
bus_dmamap_sync(sc->bfe_txmbuf_tag,
695
sc->bfe_tx_ring[i].bfe_map, BUS_DMASYNC_POSTWRITE);
696
bus_dmamap_unload(sc->bfe_txmbuf_tag,
697
sc->bfe_tx_ring[i].bfe_map);
698
m_freem(sc->bfe_tx_ring[i].bfe_mbuf);
699
sc->bfe_tx_ring[i].bfe_mbuf = NULL;
700
}
701
}
702
bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
703
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
704
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
705
}
706
707
static void
708
bfe_rx_ring_free(struct bfe_softc *sc)
709
{
710
int i;
711
712
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
713
if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) {
714
bus_dmamap_sync(sc->bfe_rxmbuf_tag,
715
sc->bfe_rx_ring[i].bfe_map, BUS_DMASYNC_POSTREAD);
716
bus_dmamap_unload(sc->bfe_rxmbuf_tag,
717
sc->bfe_rx_ring[i].bfe_map);
718
m_freem(sc->bfe_rx_ring[i].bfe_mbuf);
719
sc->bfe_rx_ring[i].bfe_mbuf = NULL;
720
}
721
}
722
bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
723
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
724
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
725
}
726
727
static int
728
bfe_list_rx_init(struct bfe_softc *sc)
729
{
730
struct bfe_rx_data *rd;
731
int i;
732
733
sc->bfe_rx_prod = sc->bfe_rx_cons = 0;
734
bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
735
for (i = 0; i < BFE_RX_LIST_CNT; i++) {
736
rd = &sc->bfe_rx_ring[i];
737
rd->bfe_mbuf = NULL;
738
rd->bfe_ctrl = 0;
739
if (bfe_list_newbuf(sc, i) != 0)
740
return (ENOBUFS);
741
}
742
743
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
744
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
745
CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc)));
746
747
return (0);
748
}
749
750
static void
751
bfe_list_tx_init(struct bfe_softc *sc)
752
{
753
int i;
754
755
sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0;
756
bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
757
for (i = 0; i < BFE_TX_LIST_CNT; i++)
758
sc->bfe_tx_ring[i].bfe_mbuf = NULL;
759
760
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
761
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
762
}
763
764
static void
765
bfe_discard_buf(struct bfe_softc *sc, int c)
766
{
767
struct bfe_rx_data *r;
768
struct bfe_desc *d;
769
770
r = &sc->bfe_rx_ring[c];
771
d = &sc->bfe_rx_list[c];
772
d->bfe_ctrl = htole32(r->bfe_ctrl);
773
}
774
775
static int
776
bfe_list_newbuf(struct bfe_softc *sc, int c)
777
{
778
struct bfe_rxheader *rx_header;
779
struct bfe_desc *d;
780
struct bfe_rx_data *r;
781
struct mbuf *m;
782
bus_dma_segment_t segs[1];
783
bus_dmamap_t map;
784
u_int32_t ctrl;
785
int nsegs;
786
787
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
788
if (m == NULL)
789
return (ENOBUFS);
790
m->m_len = m->m_pkthdr.len = MCLBYTES;
791
792
if (bus_dmamap_load_mbuf_sg(sc->bfe_rxmbuf_tag, sc->bfe_rx_sparemap,
793
m, segs, &nsegs, 0) != 0) {
794
m_freem(m);
795
return (ENOBUFS);
796
}
797
798
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
799
r = &sc->bfe_rx_ring[c];
800
if (r->bfe_mbuf != NULL) {
801
bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map,
802
BUS_DMASYNC_POSTREAD);
803
bus_dmamap_unload(sc->bfe_rxmbuf_tag, r->bfe_map);
804
}
805
map = r->bfe_map;
806
r->bfe_map = sc->bfe_rx_sparemap;
807
sc->bfe_rx_sparemap = map;
808
r->bfe_mbuf = m;
809
810
rx_header = mtod(m, struct bfe_rxheader *);
811
rx_header->len = 0;
812
rx_header->flags = 0;
813
bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map, BUS_DMASYNC_PREREAD);
814
815
ctrl = segs[0].ds_len & BFE_DESC_LEN;
816
KASSERT(ctrl > ETHER_MAX_LEN + 32, ("%s: buffer size too small(%d)!",
817
__func__, ctrl));
818
if (c == BFE_RX_LIST_CNT - 1)
819
ctrl |= BFE_DESC_EOT;
820
r->bfe_ctrl = ctrl;
821
822
d = &sc->bfe_rx_list[c];
823
d->bfe_ctrl = htole32(ctrl);
824
/* The chip needs all addresses to be added to BFE_PCI_DMA. */
825
d->bfe_addr = htole32(BFE_ADDR_LO(segs[0].ds_addr) + BFE_PCI_DMA);
826
827
return (0);
828
}
829
830
static void
831
bfe_get_config(struct bfe_softc *sc)
832
{
833
u_int8_t eeprom[128];
834
835
bfe_read_eeprom(sc, eeprom);
836
837
sc->bfe_enaddr[0] = eeprom[79];
838
sc->bfe_enaddr[1] = eeprom[78];
839
sc->bfe_enaddr[2] = eeprom[81];
840
sc->bfe_enaddr[3] = eeprom[80];
841
sc->bfe_enaddr[4] = eeprom[83];
842
sc->bfe_enaddr[5] = eeprom[82];
843
844
sc->bfe_phyaddr = eeprom[90] & 0x1f;
845
sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1;
846
847
sc->bfe_core_unit = 0;
848
sc->bfe_dma_offset = BFE_PCI_DMA;
849
}
850
851
static void
852
bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores)
853
{
854
u_int32_t bar_orig, val;
855
856
bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4);
857
pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4);
858
859
val = CSR_READ_4(sc, BFE_SBINTVEC);
860
val |= cores;
861
CSR_WRITE_4(sc, BFE_SBINTVEC, val);
862
863
val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2);
864
val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
865
CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val);
866
867
pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4);
868
}
869
870
static void
871
bfe_clear_stats(struct bfe_softc *sc)
872
{
873
uint32_t reg;
874
875
BFE_LOCK_ASSERT(sc);
876
877
CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
878
for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
879
CSR_READ_4(sc, reg);
880
for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
881
CSR_READ_4(sc, reg);
882
}
883
884
static int
885
bfe_resetphy(struct bfe_softc *sc)
886
{
887
u_int32_t val;
888
889
bfe_writephy(sc, 0, BMCR_RESET);
890
DELAY(100);
891
bfe_readphy(sc, 0, &val);
892
if (val & BMCR_RESET) {
893
device_printf(sc->bfe_dev, "PHY Reset would not complete.\n");
894
return (ENXIO);
895
}
896
return (0);
897
}
898
899
static void
900
bfe_chip_halt(struct bfe_softc *sc)
901
{
902
BFE_LOCK_ASSERT(sc);
903
/* disable interrupts - not that it actually does..*/
904
CSR_WRITE_4(sc, BFE_IMASK, 0);
905
CSR_READ_4(sc, BFE_IMASK);
906
907
CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
908
bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1);
909
910
CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
911
CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
912
DELAY(10);
913
}
914
915
static void
916
bfe_chip_reset(struct bfe_softc *sc)
917
{
918
u_int32_t val;
919
920
BFE_LOCK_ASSERT(sc);
921
922
/* Set the interrupt vector for the enet core */
923
bfe_pci_setup(sc, BFE_INTVEC_ENET0);
924
925
/* is core up? */
926
val = CSR_READ_4(sc, BFE_SBTMSLOW) &
927
(BFE_RESET | BFE_REJECT | BFE_CLOCK);
928
if (val == BFE_CLOCK) {
929
/* It is, so shut it down */
930
CSR_WRITE_4(sc, BFE_RCV_LAZY, 0);
931
CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
932
bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1);
933
CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
934
if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK)
935
bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE,
936
100, 0);
937
CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
938
}
939
940
bfe_core_reset(sc);
941
bfe_clear_stats(sc);
942
943
/*
944
* We want the phy registers to be accessible even when
945
* the driver is "downed" so initialize MDC preamble, frequency,
946
* and whether internal or external phy here.
947
*/
948
949
/* 4402 has 62.5Mhz SB clock and internal phy */
950
CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d);
951
952
/* Internal or external PHY? */
953
val = CSR_READ_4(sc, BFE_DEVCTRL);
954
if (!(val & BFE_IPP))
955
CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL);
956
else if (CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) {
957
BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR);
958
DELAY(100);
959
}
960
961
/* Enable CRC32 generation and set proper LED modes */
962
BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
963
964
/* Reset or clear powerdown control bit */
965
BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
966
967
CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
968
BFE_LAZY_FC_MASK));
969
970
/*
971
* We don't want lazy interrupts, so just send them at
972
* the end of a frame, please
973
*/
974
BFE_OR(sc, BFE_RCV_LAZY, 0);
975
976
/* Set max lengths, accounting for VLAN tags */
977
CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32);
978
CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32);
979
980
/* Set watermark XXX - magic */
981
CSR_WRITE_4(sc, BFE_TX_WMARK, 56);
982
983
/*
984
* Initialise DMA channels
985
* - not forgetting dma addresses need to be added to BFE_PCI_DMA
986
*/
987
CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
988
CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA);
989
990
CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) |
991
BFE_RX_CTRL_ENABLE);
992
CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA);
993
994
bfe_resetphy(sc);
995
bfe_setupphy(sc);
996
}
997
998
static void
999
bfe_core_disable(struct bfe_softc *sc)
1000
{
1001
if ((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET)
1002
return;
1003
1004
/*
1005
* Set reject, wait for it set, then wait for the core to stop
1006
* being busy, then set reset and reject and enable the clocks.
1007
*/
1008
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
1009
bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0);
1010
bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1);
1011
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT |
1012
BFE_RESET));
1013
CSR_READ_4(sc, BFE_SBTMSLOW);
1014
DELAY(10);
1015
/* Leave reset and reject set */
1016
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
1017
DELAY(10);
1018
}
1019
1020
static void
1021
bfe_core_reset(struct bfe_softc *sc)
1022
{
1023
u_int32_t val;
1024
1025
/* Disable the core */
1026
bfe_core_disable(sc);
1027
1028
/* and bring it back up */
1029
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
1030
CSR_READ_4(sc, BFE_SBTMSLOW);
1031
DELAY(10);
1032
1033
/* Chip bug, clear SERR, IB and TO if they are set. */
1034
if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR)
1035
CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0);
1036
val = CSR_READ_4(sc, BFE_SBIMSTATE);
1037
if (val & (BFE_IBE | BFE_TO))
1038
CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
1039
1040
/* Clear reset and allow it to move through the core */
1041
CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
1042
CSR_READ_4(sc, BFE_SBTMSLOW);
1043
DELAY(10);
1044
1045
/* Leave the clock set */
1046
CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK);
1047
CSR_READ_4(sc, BFE_SBTMSLOW);
1048
DELAY(10);
1049
}
1050
1051
static void
1052
bfe_cam_write(struct bfe_softc *sc, u_char *data, int index)
1053
{
1054
u_int32_t val;
1055
1056
val = ((u_int32_t) data[2]) << 24;
1057
val |= ((u_int32_t) data[3]) << 16;
1058
val |= ((u_int32_t) data[4]) << 8;
1059
val |= ((u_int32_t) data[5]);
1060
CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val);
1061
val = (BFE_CAM_HI_VALID |
1062
(((u_int32_t) data[0]) << 8) |
1063
(((u_int32_t) data[1])));
1064
CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val);
1065
CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE |
1066
((u_int32_t) index << BFE_CAM_INDEX_SHIFT)));
1067
bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1);
1068
}
1069
1070
static u_int
1071
bfe_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1072
{
1073
struct bfe_softc *sc = arg;
1074
1075
bfe_cam_write(sc, LLADDR(sdl), cnt + 1);
1076
1077
return (1);
1078
}
1079
1080
static void
1081
bfe_set_rx_mode(struct bfe_softc *sc)
1082
{
1083
if_t ifp = sc->bfe_ifp;
1084
u_int32_t val;
1085
1086
BFE_LOCK_ASSERT(sc);
1087
1088
val = CSR_READ_4(sc, BFE_RXCONF);
1089
1090
if (if_getflags(ifp) & IFF_PROMISC)
1091
val |= BFE_RXCONF_PROMISC;
1092
else
1093
val &= ~BFE_RXCONF_PROMISC;
1094
1095
if (if_getflags(ifp) & IFF_BROADCAST)
1096
val &= ~BFE_RXCONF_DBCAST;
1097
else
1098
val |= BFE_RXCONF_DBCAST;
1099
1100
CSR_WRITE_4(sc, BFE_CAM_CTRL, 0);
1101
bfe_cam_write(sc, if_getlladdr(sc->bfe_ifp), 0);
1102
1103
if (if_getflags(ifp) & IFF_ALLMULTI)
1104
val |= BFE_RXCONF_ALLMULTI;
1105
else {
1106
val &= ~BFE_RXCONF_ALLMULTI;
1107
if_foreach_llmaddr(ifp, bfe_write_maddr, sc);
1108
}
1109
1110
CSR_WRITE_4(sc, BFE_RXCONF, val);
1111
BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE);
1112
}
1113
1114
static void
1115
bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1116
{
1117
struct bfe_dmamap_arg *ctx;
1118
1119
if (error != 0)
1120
return;
1121
1122
KASSERT(nseg == 1, ("%s : %d segments returned!", __func__, nseg));
1123
1124
ctx = (struct bfe_dmamap_arg *)arg;
1125
ctx->bfe_busaddr = segs[0].ds_addr;
1126
}
1127
1128
static void
1129
bfe_release_resources(struct bfe_softc *sc)
1130
{
1131
1132
if (sc->bfe_intrhand != NULL)
1133
bus_teardown_intr(sc->bfe_dev, sc->bfe_irq, sc->bfe_intrhand);
1134
1135
if (sc->bfe_irq != NULL)
1136
bus_release_resource(sc->bfe_dev, SYS_RES_IRQ, 0, sc->bfe_irq);
1137
1138
if (sc->bfe_res != NULL)
1139
bus_release_resource(sc->bfe_dev, SYS_RES_MEMORY, PCIR_BAR(0),
1140
sc->bfe_res);
1141
1142
if (sc->bfe_ifp != NULL)
1143
if_free(sc->bfe_ifp);
1144
}
1145
1146
static void
1147
bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data)
1148
{
1149
long i;
1150
u_int16_t *ptr = (u_int16_t *)data;
1151
1152
for(i = 0; i < 128; i += 2)
1153
ptr[i/2] = CSR_READ_4(sc, 4096 + i);
1154
}
1155
1156
static int
1157
bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit,
1158
u_long timeout, const int clear)
1159
{
1160
u_long i;
1161
1162
for (i = 0; i < timeout; i++) {
1163
u_int32_t val = CSR_READ_4(sc, reg);
1164
1165
if (clear && !(val & bit))
1166
break;
1167
if (!clear && (val & bit))
1168
break;
1169
DELAY(10);
1170
}
1171
if (i == timeout) {
1172
device_printf(sc->bfe_dev,
1173
"BUG! Timeout waiting for bit %08x of register "
1174
"%x to %s.\n", bit, reg, (clear ? "clear" : "set"));
1175
return (-1);
1176
}
1177
return (0);
1178
}
1179
1180
static int
1181
bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val)
1182
{
1183
int err;
1184
1185
/* Clear MII ISR */
1186
CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
1187
CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
1188
(BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
1189
(sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
1190
(reg << BFE_MDIO_RA_SHIFT) |
1191
(BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
1192
err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
1193
*val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA;
1194
1195
return (err);
1196
}
1197
1198
static int
1199
bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val)
1200
{
1201
int status;
1202
1203
CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
1204
CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
1205
(BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
1206
(sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
1207
(reg << BFE_MDIO_RA_SHIFT) |
1208
(BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
1209
(val & BFE_MDIO_DATA_DATA)));
1210
status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
1211
1212
return (status);
1213
}
1214
1215
/*
1216
* XXX - I think this is handled by the PHY driver, but it can't hurt to do it
1217
* twice
1218
*/
1219
static int
1220
bfe_setupphy(struct bfe_softc *sc)
1221
{
1222
u_int32_t val;
1223
1224
/* Enable activity LED */
1225
bfe_readphy(sc, 26, &val);
1226
bfe_writephy(sc, 26, val & 0x7fff);
1227
bfe_readphy(sc, 26, &val);
1228
1229
/* Enable traffic meter LED mode */
1230
bfe_readphy(sc, 27, &val);
1231
bfe_writephy(sc, 27, val | (1 << 6));
1232
1233
return (0);
1234
}
1235
1236
static void
1237
bfe_stats_update(struct bfe_softc *sc)
1238
{
1239
struct bfe_hw_stats *stats;
1240
if_t ifp;
1241
uint32_t mib[BFE_MIB_CNT];
1242
uint32_t reg, *val;
1243
1244
BFE_LOCK_ASSERT(sc);
1245
1246
val = mib;
1247
CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
1248
for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
1249
*val++ = CSR_READ_4(sc, reg);
1250
for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
1251
*val++ = CSR_READ_4(sc, reg);
1252
1253
ifp = sc->bfe_ifp;
1254
stats = &sc->bfe_stats;
1255
/* Tx stat. */
1256
stats->tx_good_octets += mib[MIB_TX_GOOD_O];
1257
stats->tx_good_frames += mib[MIB_TX_GOOD_P];
1258
stats->tx_octets += mib[MIB_TX_O];
1259
stats->tx_frames += mib[MIB_TX_P];
1260
stats->tx_bcast_frames += mib[MIB_TX_BCAST];
1261
stats->tx_mcast_frames += mib[MIB_TX_MCAST];
1262
stats->tx_pkts_64 += mib[MIB_TX_64];
1263
stats->tx_pkts_65_127 += mib[MIB_TX_65_127];
1264
stats->tx_pkts_128_255 += mib[MIB_TX_128_255];
1265
stats->tx_pkts_256_511 += mib[MIB_TX_256_511];
1266
stats->tx_pkts_512_1023 += mib[MIB_TX_512_1023];
1267
stats->tx_pkts_1024_max += mib[MIB_TX_1024_MAX];
1268
stats->tx_jabbers += mib[MIB_TX_JABBER];
1269
stats->tx_oversize_frames += mib[MIB_TX_OSIZE];
1270
stats->tx_frag_frames += mib[MIB_TX_FRAG];
1271
stats->tx_underruns += mib[MIB_TX_URUNS];
1272
stats->tx_colls += mib[MIB_TX_TCOLS];
1273
stats->tx_single_colls += mib[MIB_TX_SCOLS];
1274
stats->tx_multi_colls += mib[MIB_TX_MCOLS];
1275
stats->tx_excess_colls += mib[MIB_TX_ECOLS];
1276
stats->tx_late_colls += mib[MIB_TX_LCOLS];
1277
stats->tx_deferrals += mib[MIB_TX_DEFERED];
1278
stats->tx_carrier_losts += mib[MIB_TX_CLOST];
1279
stats->tx_pause_frames += mib[MIB_TX_PAUSE];
1280
/* Rx stat. */
1281
stats->rx_good_octets += mib[MIB_RX_GOOD_O];
1282
stats->rx_good_frames += mib[MIB_RX_GOOD_P];
1283
stats->rx_octets += mib[MIB_RX_O];
1284
stats->rx_frames += mib[MIB_RX_P];
1285
stats->rx_bcast_frames += mib[MIB_RX_BCAST];
1286
stats->rx_mcast_frames += mib[MIB_RX_MCAST];
1287
stats->rx_pkts_64 += mib[MIB_RX_64];
1288
stats->rx_pkts_65_127 += mib[MIB_RX_65_127];
1289
stats->rx_pkts_128_255 += mib[MIB_RX_128_255];
1290
stats->rx_pkts_256_511 += mib[MIB_RX_256_511];
1291
stats->rx_pkts_512_1023 += mib[MIB_RX_512_1023];
1292
stats->rx_pkts_1024_max += mib[MIB_RX_1024_MAX];
1293
stats->rx_jabbers += mib[MIB_RX_JABBER];
1294
stats->rx_oversize_frames += mib[MIB_RX_OSIZE];
1295
stats->rx_frag_frames += mib[MIB_RX_FRAG];
1296
stats->rx_missed_frames += mib[MIB_RX_MISS];
1297
stats->rx_crc_align_errs += mib[MIB_RX_CRCA];
1298
stats->rx_runts += mib[MIB_RX_USIZE];
1299
stats->rx_crc_errs += mib[MIB_RX_CRC];
1300
stats->rx_align_errs += mib[MIB_RX_ALIGN];
1301
stats->rx_symbol_errs += mib[MIB_RX_SYM];
1302
stats->rx_pause_frames += mib[MIB_RX_PAUSE];
1303
stats->rx_control_frames += mib[MIB_RX_NPAUSE];
1304
1305
/* Update counters in ifnet. */
1306
if_inc_counter(ifp, IFCOUNTER_OPACKETS, (u_long)mib[MIB_TX_GOOD_P]);
1307
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (u_long)mib[MIB_TX_TCOLS]);
1308
if_inc_counter(ifp, IFCOUNTER_OERRORS, (u_long)mib[MIB_TX_URUNS] +
1309
(u_long)mib[MIB_TX_ECOLS] +
1310
(u_long)mib[MIB_TX_DEFERED] +
1311
(u_long)mib[MIB_TX_CLOST]);
1312
1313
if_inc_counter(ifp, IFCOUNTER_IPACKETS, (u_long)mib[MIB_RX_GOOD_P]);
1314
1315
if_inc_counter(ifp, IFCOUNTER_IERRORS, mib[MIB_RX_JABBER] +
1316
mib[MIB_RX_MISS] +
1317
mib[MIB_RX_CRCA] +
1318
mib[MIB_RX_USIZE] +
1319
mib[MIB_RX_CRC] +
1320
mib[MIB_RX_ALIGN] +
1321
mib[MIB_RX_SYM]);
1322
}
1323
1324
static void
1325
bfe_txeof(struct bfe_softc *sc)
1326
{
1327
struct bfe_tx_data *r;
1328
if_t ifp;
1329
int i, chipidx;
1330
1331
BFE_LOCK_ASSERT(sc);
1332
1333
ifp = sc->bfe_ifp;
1334
1335
chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
1336
chipidx /= sizeof(struct bfe_desc);
1337
1338
i = sc->bfe_tx_cons;
1339
if (i == chipidx)
1340
return;
1341
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
1342
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1343
/* Go through the mbufs and free those that have been transmitted */
1344
for (; i != chipidx; BFE_INC(i, BFE_TX_LIST_CNT)) {
1345
r = &sc->bfe_tx_ring[i];
1346
sc->bfe_tx_cnt--;
1347
if (r->bfe_mbuf == NULL)
1348
continue;
1349
bus_dmamap_sync(sc->bfe_txmbuf_tag, r->bfe_map,
1350
BUS_DMASYNC_POSTWRITE);
1351
bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map);
1352
1353
m_freem(r->bfe_mbuf);
1354
r->bfe_mbuf = NULL;
1355
}
1356
1357
if (i != sc->bfe_tx_cons) {
1358
/* we freed up some mbufs */
1359
sc->bfe_tx_cons = i;
1360
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1361
}
1362
1363
if (sc->bfe_tx_cnt == 0)
1364
sc->bfe_watchdog_timer = 0;
1365
}
1366
1367
/* Pass a received packet up the stack */
1368
static void
1369
bfe_rxeof(struct bfe_softc *sc)
1370
{
1371
struct mbuf *m;
1372
if_t ifp;
1373
struct bfe_rxheader *rxheader;
1374
struct bfe_rx_data *r;
1375
int cons, prog;
1376
u_int32_t status, current, len, flags;
1377
1378
BFE_LOCK_ASSERT(sc);
1379
cons = sc->bfe_rx_cons;
1380
status = CSR_READ_4(sc, BFE_DMARX_STAT);
1381
current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc);
1382
1383
ifp = sc->bfe_ifp;
1384
1385
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
1386
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1387
1388
for (prog = 0; current != cons; prog++,
1389
BFE_INC(cons, BFE_RX_LIST_CNT)) {
1390
r = &sc->bfe_rx_ring[cons];
1391
m = r->bfe_mbuf;
1392
/*
1393
* Rx status should be read from mbuf such that we can't
1394
* delay bus_dmamap_sync(9). This hardware limiation
1395
* results in inefficient mbuf usage as bfe(4) couldn't
1396
* reuse mapped buffer from errored frame.
1397
*/
1398
if (bfe_list_newbuf(sc, cons) != 0) {
1399
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1400
bfe_discard_buf(sc, cons);
1401
continue;
1402
}
1403
rxheader = mtod(m, struct bfe_rxheader*);
1404
len = le16toh(rxheader->len);
1405
flags = le16toh(rxheader->flags);
1406
1407
/* Remove CRC bytes. */
1408
len -= ETHER_CRC_LEN;
1409
1410
/* flag an error and try again */
1411
if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) {
1412
m_freem(m);
1413
continue;
1414
}
1415
1416
/* Make sure to skip header bytes written by hardware. */
1417
m_adj(m, BFE_RX_OFFSET);
1418
m->m_len = m->m_pkthdr.len = len;
1419
1420
m->m_pkthdr.rcvif = ifp;
1421
BFE_UNLOCK(sc);
1422
if_input(ifp, m);
1423
BFE_LOCK(sc);
1424
}
1425
1426
if (prog > 0) {
1427
sc->bfe_rx_cons = cons;
1428
bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map,
1429
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1430
}
1431
}
1432
1433
static void
1434
bfe_intr(void *xsc)
1435
{
1436
struct bfe_softc *sc = xsc;
1437
if_t ifp;
1438
u_int32_t istat;
1439
1440
ifp = sc->bfe_ifp;
1441
1442
BFE_LOCK(sc);
1443
1444
istat = CSR_READ_4(sc, BFE_ISTAT);
1445
1446
/*
1447
* Defer unsolicited interrupts - This is necessary because setting the
1448
* chips interrupt mask register to 0 doesn't actually stop the
1449
* interrupts
1450
*/
1451
istat &= BFE_IMASK_DEF;
1452
CSR_WRITE_4(sc, BFE_ISTAT, istat);
1453
CSR_READ_4(sc, BFE_ISTAT);
1454
1455
/* not expecting this interrupt, disregard it */
1456
if (istat == 0 || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1457
BFE_UNLOCK(sc);
1458
return;
1459
}
1460
1461
/* A packet was received */
1462
if (istat & BFE_ISTAT_RX)
1463
bfe_rxeof(sc);
1464
1465
/* A packet was sent */
1466
if (istat & BFE_ISTAT_TX)
1467
bfe_txeof(sc);
1468
1469
if (istat & BFE_ISTAT_ERRORS) {
1470
if (istat & BFE_ISTAT_DSCE) {
1471
device_printf(sc->bfe_dev, "Descriptor Error\n");
1472
bfe_stop(sc);
1473
BFE_UNLOCK(sc);
1474
return;
1475
}
1476
1477
if (istat & BFE_ISTAT_DPE) {
1478
device_printf(sc->bfe_dev,
1479
"Descriptor Protocol Error\n");
1480
bfe_stop(sc);
1481
BFE_UNLOCK(sc);
1482
return;
1483
}
1484
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1485
bfe_init_locked(sc);
1486
}
1487
1488
/* We have packets pending, fire them out */
1489
if (!if_sendq_empty(ifp))
1490
bfe_start_locked(ifp);
1491
1492
BFE_UNLOCK(sc);
1493
}
1494
1495
static int
1496
bfe_encap(struct bfe_softc *sc, struct mbuf **m_head)
1497
{
1498
struct bfe_desc *d;
1499
struct bfe_tx_data *r, *r1;
1500
struct mbuf *m;
1501
bus_dmamap_t map;
1502
bus_dma_segment_t txsegs[BFE_MAXTXSEGS];
1503
uint32_t cur, si;
1504
int error, i, nsegs;
1505
1506
BFE_LOCK_ASSERT(sc);
1507
1508
M_ASSERTPKTHDR((*m_head));
1509
1510
si = cur = sc->bfe_tx_prod;
1511
r = &sc->bfe_tx_ring[cur];
1512
error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, *m_head,
1513
txsegs, &nsegs, 0);
1514
if (error == EFBIG) {
1515
m = m_collapse(*m_head, M_NOWAIT, BFE_MAXTXSEGS);
1516
if (m == NULL) {
1517
m_freem(*m_head);
1518
*m_head = NULL;
1519
return (ENOMEM);
1520
}
1521
*m_head = m;
1522
error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map,
1523
*m_head, txsegs, &nsegs, 0);
1524
if (error != 0) {
1525
m_freem(*m_head);
1526
*m_head = NULL;
1527
return (error);
1528
}
1529
} else if (error != 0)
1530
return (error);
1531
if (nsegs == 0) {
1532
m_freem(*m_head);
1533
*m_head = NULL;
1534
return (EIO);
1535
}
1536
1537
if (sc->bfe_tx_cnt + nsegs > BFE_TX_LIST_CNT - 1) {
1538
bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map);
1539
return (ENOBUFS);
1540
}
1541
1542
for (i = 0; i < nsegs; i++) {
1543
d = &sc->bfe_tx_list[cur];
1544
d->bfe_ctrl = htole32(txsegs[i].ds_len & BFE_DESC_LEN);
1545
d->bfe_ctrl |= htole32(BFE_DESC_IOC);
1546
if (cur == BFE_TX_LIST_CNT - 1)
1547
/*
1548
* Tell the chip to wrap to the start of
1549
* the descriptor list.
1550
*/
1551
d->bfe_ctrl |= htole32(BFE_DESC_EOT);
1552
/* The chip needs all addresses to be added to BFE_PCI_DMA. */
1553
d->bfe_addr = htole32(BFE_ADDR_LO(txsegs[i].ds_addr) +
1554
BFE_PCI_DMA);
1555
BFE_INC(cur, BFE_TX_LIST_CNT);
1556
}
1557
1558
/* Update producer index. */
1559
sc->bfe_tx_prod = cur;
1560
1561
/* Set EOF on the last descriptor. */
1562
cur = (cur + BFE_TX_LIST_CNT - 1) % BFE_TX_LIST_CNT;
1563
d = &sc->bfe_tx_list[cur];
1564
d->bfe_ctrl |= htole32(BFE_DESC_EOF);
1565
1566
/* Lastly set SOF on the first descriptor to avoid races. */
1567
d = &sc->bfe_tx_list[si];
1568
d->bfe_ctrl |= htole32(BFE_DESC_SOF);
1569
1570
r1 = &sc->bfe_tx_ring[cur];
1571
map = r->bfe_map;
1572
r->bfe_map = r1->bfe_map;
1573
r1->bfe_map = map;
1574
r1->bfe_mbuf = *m_head;
1575
sc->bfe_tx_cnt += nsegs;
1576
1577
bus_dmamap_sync(sc->bfe_txmbuf_tag, map, BUS_DMASYNC_PREWRITE);
1578
1579
return (0);
1580
}
1581
1582
/*
1583
* Set up to transmit a packet.
1584
*/
1585
static void
1586
bfe_start(if_t ifp)
1587
{
1588
BFE_LOCK((struct bfe_softc *)if_getsoftc(ifp));
1589
bfe_start_locked(ifp);
1590
BFE_UNLOCK((struct bfe_softc *)if_getsoftc(ifp));
1591
}
1592
1593
/*
1594
* Set up to transmit a packet. The softc is already locked.
1595
*/
1596
static void
1597
bfe_start_locked(if_t ifp)
1598
{
1599
struct bfe_softc *sc;
1600
struct mbuf *m_head;
1601
int queued;
1602
1603
sc = if_getsoftc(ifp);
1604
1605
BFE_LOCK_ASSERT(sc);
1606
1607
/*
1608
* Not much point trying to send if the link is down
1609
* or we have nothing to send.
1610
*/
1611
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1612
IFF_DRV_RUNNING || (sc->bfe_flags & BFE_FLAG_LINK) == 0)
1613
return;
1614
1615
for (queued = 0; !if_sendq_empty(ifp) &&
1616
sc->bfe_tx_cnt < BFE_TX_LIST_CNT - 1;) {
1617
m_head = if_dequeue(ifp);
1618
if (m_head == NULL)
1619
break;
1620
1621
/*
1622
* Pack the data into the tx ring. If we dont have
1623
* enough room, let the chip drain the ring.
1624
*/
1625
if (bfe_encap(sc, &m_head)) {
1626
if (m_head == NULL)
1627
break;
1628
if_sendq_prepend(ifp, m_head);
1629
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1630
break;
1631
}
1632
1633
queued++;
1634
1635
/*
1636
* If there's a BPF listener, bounce a copy of this frame
1637
* to him.
1638
*/
1639
BPF_MTAP(ifp, m_head);
1640
}
1641
1642
if (queued) {
1643
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map,
1644
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1645
/* Transmit - twice due to apparent hardware bug */
1646
CSR_WRITE_4(sc, BFE_DMATX_PTR,
1647
sc->bfe_tx_prod * sizeof(struct bfe_desc));
1648
/*
1649
* XXX It seems the following write is not necessary
1650
* to kick Tx command. What might be required would be
1651
* a way flushing PCI posted write. Reading the register
1652
* back ensures the flush operation. In addition,
1653
* hardware will execute PCI posted write in the long
1654
* run and watchdog timer for the kick command was set
1655
* to 5 seconds. Therefore I think the second write
1656
* access is not necessary or could be replaced with
1657
* read operation.
1658
*/
1659
CSR_WRITE_4(sc, BFE_DMATX_PTR,
1660
sc->bfe_tx_prod * sizeof(struct bfe_desc));
1661
1662
/*
1663
* Set a timeout in case the chip goes out to lunch.
1664
*/
1665
sc->bfe_watchdog_timer = 5;
1666
}
1667
}
1668
1669
static void
1670
bfe_init(void *xsc)
1671
{
1672
BFE_LOCK((struct bfe_softc *)xsc);
1673
bfe_init_locked(xsc);
1674
BFE_UNLOCK((struct bfe_softc *)xsc);
1675
}
1676
1677
static void
1678
bfe_init_locked(void *xsc)
1679
{
1680
struct bfe_softc *sc = (struct bfe_softc*)xsc;
1681
if_t ifp = sc->bfe_ifp;
1682
struct mii_data *mii;
1683
1684
BFE_LOCK_ASSERT(sc);
1685
1686
mii = device_get_softc(sc->bfe_miibus);
1687
1688
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1689
return;
1690
1691
bfe_stop(sc);
1692
bfe_chip_reset(sc);
1693
1694
if (bfe_list_rx_init(sc) == ENOBUFS) {
1695
device_printf(sc->bfe_dev,
1696
"%s: Not enough memory for list buffers\n", __func__);
1697
bfe_stop(sc);
1698
return;
1699
}
1700
bfe_list_tx_init(sc);
1701
1702
bfe_set_rx_mode(sc);
1703
1704
/* Enable the chip and core */
1705
BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1706
/* Enable interrupts */
1707
CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF);
1708
1709
/* Clear link state and change media. */
1710
sc->bfe_flags &= ~BFE_FLAG_LINK;
1711
mii_mediachg(mii);
1712
1713
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1714
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1715
1716
callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc);
1717
}
1718
1719
/*
1720
* Set media options.
1721
*/
1722
static int
1723
bfe_ifmedia_upd(if_t ifp)
1724
{
1725
struct bfe_softc *sc;
1726
struct mii_data *mii;
1727
struct mii_softc *miisc;
1728
int error;
1729
1730
sc = if_getsoftc(ifp);
1731
BFE_LOCK(sc);
1732
1733
mii = device_get_softc(sc->bfe_miibus);
1734
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1735
PHY_RESET(miisc);
1736
error = mii_mediachg(mii);
1737
BFE_UNLOCK(sc);
1738
1739
return (error);
1740
}
1741
1742
/*
1743
* Report current media status.
1744
*/
1745
static void
1746
bfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1747
{
1748
struct bfe_softc *sc = if_getsoftc(ifp);
1749
struct mii_data *mii;
1750
1751
BFE_LOCK(sc);
1752
mii = device_get_softc(sc->bfe_miibus);
1753
mii_pollstat(mii);
1754
ifmr->ifm_active = mii->mii_media_active;
1755
ifmr->ifm_status = mii->mii_media_status;
1756
BFE_UNLOCK(sc);
1757
}
1758
1759
static int
1760
bfe_ioctl(if_t ifp, u_long command, caddr_t data)
1761
{
1762
struct bfe_softc *sc = if_getsoftc(ifp);
1763
struct ifreq *ifr = (struct ifreq *) data;
1764
struct mii_data *mii;
1765
int error = 0;
1766
1767
switch (command) {
1768
case SIOCSIFFLAGS:
1769
BFE_LOCK(sc);
1770
if (if_getflags(ifp) & IFF_UP) {
1771
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1772
bfe_set_rx_mode(sc);
1773
else if ((sc->bfe_flags & BFE_FLAG_DETACH) == 0)
1774
bfe_init_locked(sc);
1775
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1776
bfe_stop(sc);
1777
BFE_UNLOCK(sc);
1778
break;
1779
case SIOCADDMULTI:
1780
case SIOCDELMULTI:
1781
BFE_LOCK(sc);
1782
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1783
bfe_set_rx_mode(sc);
1784
BFE_UNLOCK(sc);
1785
break;
1786
case SIOCGIFMEDIA:
1787
case SIOCSIFMEDIA:
1788
mii = device_get_softc(sc->bfe_miibus);
1789
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1790
break;
1791
default:
1792
error = ether_ioctl(ifp, command, data);
1793
break;
1794
}
1795
1796
return (error);
1797
}
1798
1799
static void
1800
bfe_watchdog(struct bfe_softc *sc)
1801
{
1802
if_t ifp;
1803
1804
BFE_LOCK_ASSERT(sc);
1805
1806
if (sc->bfe_watchdog_timer == 0 || --sc->bfe_watchdog_timer)
1807
return;
1808
1809
ifp = sc->bfe_ifp;
1810
1811
device_printf(sc->bfe_dev, "watchdog timeout -- resetting\n");
1812
1813
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1814
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1815
bfe_init_locked(sc);
1816
1817
if (!if_sendq_empty(ifp))
1818
bfe_start_locked(ifp);
1819
}
1820
1821
static void
1822
bfe_tick(void *xsc)
1823
{
1824
struct bfe_softc *sc = xsc;
1825
struct mii_data *mii;
1826
1827
BFE_LOCK_ASSERT(sc);
1828
1829
mii = device_get_softc(sc->bfe_miibus);
1830
mii_tick(mii);
1831
bfe_stats_update(sc);
1832
bfe_watchdog(sc);
1833
callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc);
1834
}
1835
1836
/*
1837
* Stop the adapter and free any mbufs allocated to the
1838
* RX and TX lists.
1839
*/
1840
static void
1841
bfe_stop(struct bfe_softc *sc)
1842
{
1843
if_t ifp;
1844
1845
BFE_LOCK_ASSERT(sc);
1846
1847
ifp = sc->bfe_ifp;
1848
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1849
sc->bfe_flags &= ~BFE_FLAG_LINK;
1850
callout_stop(&sc->bfe_stat_co);
1851
sc->bfe_watchdog_timer = 0;
1852
1853
bfe_chip_halt(sc);
1854
bfe_tx_ring_free(sc);
1855
bfe_rx_ring_free(sc);
1856
}
1857
1858
static int
1859
sysctl_bfe_stats(SYSCTL_HANDLER_ARGS)
1860
{
1861
struct bfe_softc *sc;
1862
struct bfe_hw_stats *stats;
1863
int error, result;
1864
1865
result = -1;
1866
error = sysctl_handle_int(oidp, &result, 0, req);
1867
1868
if (error != 0 || req->newptr == NULL)
1869
return (error);
1870
1871
if (result != 1)
1872
return (error);
1873
1874
sc = (struct bfe_softc *)arg1;
1875
stats = &sc->bfe_stats;
1876
1877
printf("%s statistics:\n", device_get_nameunit(sc->bfe_dev));
1878
printf("Transmit good octets : %ju\n",
1879
(uintmax_t)stats->tx_good_octets);
1880
printf("Transmit good frames : %ju\n",
1881
(uintmax_t)stats->tx_good_frames);
1882
printf("Transmit octets : %ju\n",
1883
(uintmax_t)stats->tx_octets);
1884
printf("Transmit frames : %ju\n",
1885
(uintmax_t)stats->tx_frames);
1886
printf("Transmit broadcast frames : %ju\n",
1887
(uintmax_t)stats->tx_bcast_frames);
1888
printf("Transmit multicast frames : %ju\n",
1889
(uintmax_t)stats->tx_mcast_frames);
1890
printf("Transmit frames 64 bytes : %ju\n",
1891
(uint64_t)stats->tx_pkts_64);
1892
printf("Transmit frames 65 to 127 bytes : %ju\n",
1893
(uint64_t)stats->tx_pkts_65_127);
1894
printf("Transmit frames 128 to 255 bytes : %ju\n",
1895
(uint64_t)stats->tx_pkts_128_255);
1896
printf("Transmit frames 256 to 511 bytes : %ju\n",
1897
(uint64_t)stats->tx_pkts_256_511);
1898
printf("Transmit frames 512 to 1023 bytes : %ju\n",
1899
(uint64_t)stats->tx_pkts_512_1023);
1900
printf("Transmit frames 1024 to max bytes : %ju\n",
1901
(uint64_t)stats->tx_pkts_1024_max);
1902
printf("Transmit jabber errors : %u\n", stats->tx_jabbers);
1903
printf("Transmit oversized frames : %ju\n",
1904
(uint64_t)stats->tx_oversize_frames);
1905
printf("Transmit fragmented frames : %ju\n",
1906
(uint64_t)stats->tx_frag_frames);
1907
printf("Transmit underruns : %u\n", stats->tx_colls);
1908
printf("Transmit total collisions : %u\n", stats->tx_single_colls);
1909
printf("Transmit single collisions : %u\n", stats->tx_single_colls);
1910
printf("Transmit multiple collisions : %u\n", stats->tx_multi_colls);
1911
printf("Transmit excess collisions : %u\n", stats->tx_excess_colls);
1912
printf("Transmit late collisions : %u\n", stats->tx_late_colls);
1913
printf("Transmit deferrals : %u\n", stats->tx_deferrals);
1914
printf("Transmit carrier losts : %u\n", stats->tx_carrier_losts);
1915
printf("Transmit pause frames : %u\n", stats->tx_pause_frames);
1916
1917
printf("Receive good octets : %ju\n",
1918
(uintmax_t)stats->rx_good_octets);
1919
printf("Receive good frames : %ju\n",
1920
(uintmax_t)stats->rx_good_frames);
1921
printf("Receive octets : %ju\n",
1922
(uintmax_t)stats->rx_octets);
1923
printf("Receive frames : %ju\n",
1924
(uintmax_t)stats->rx_frames);
1925
printf("Receive broadcast frames : %ju\n",
1926
(uintmax_t)stats->rx_bcast_frames);
1927
printf("Receive multicast frames : %ju\n",
1928
(uintmax_t)stats->rx_mcast_frames);
1929
printf("Receive frames 64 bytes : %ju\n",
1930
(uint64_t)stats->rx_pkts_64);
1931
printf("Receive frames 65 to 127 bytes : %ju\n",
1932
(uint64_t)stats->rx_pkts_65_127);
1933
printf("Receive frames 128 to 255 bytes : %ju\n",
1934
(uint64_t)stats->rx_pkts_128_255);
1935
printf("Receive frames 256 to 511 bytes : %ju\n",
1936
(uint64_t)stats->rx_pkts_256_511);
1937
printf("Receive frames 512 to 1023 bytes : %ju\n",
1938
(uint64_t)stats->rx_pkts_512_1023);
1939
printf("Receive frames 1024 to max bytes : %ju\n",
1940
(uint64_t)stats->rx_pkts_1024_max);
1941
printf("Receive jabber errors : %u\n", stats->rx_jabbers);
1942
printf("Receive oversized frames : %ju\n",
1943
(uint64_t)stats->rx_oversize_frames);
1944
printf("Receive fragmented frames : %ju\n",
1945
(uint64_t)stats->rx_frag_frames);
1946
printf("Receive missed frames : %u\n", stats->rx_missed_frames);
1947
printf("Receive CRC align errors : %u\n", stats->rx_crc_align_errs);
1948
printf("Receive undersized frames : %u\n", stats->rx_runts);
1949
printf("Receive CRC errors : %u\n", stats->rx_crc_errs);
1950
printf("Receive align errors : %u\n", stats->rx_align_errs);
1951
printf("Receive symbol errors : %u\n", stats->rx_symbol_errs);
1952
printf("Receive pause frames : %u\n", stats->rx_pause_frames);
1953
printf("Receive control frames : %u\n", stats->rx_control_frames);
1954
1955
return (error);
1956
}
1957
1958