Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/ae/if_ae.c
39535 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2008 Stanislav Sedov <[email protected]>.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
*
27
* Driver for Attansic Technology Corp. L2 FastEthernet adapter.
28
*
29
* This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
30
*/
31
32
#include <sys/param.h>
33
#include <sys/systm.h>
34
#include <sys/bus.h>
35
#include <sys/endian.h>
36
#include <sys/kernel.h>
37
#include <sys/lock.h>
38
#include <sys/malloc.h>
39
#include <sys/mbuf.h>
40
#include <sys/mutex.h>
41
#include <sys/rman.h>
42
#include <sys/module.h>
43
#include <sys/queue.h>
44
#include <sys/socket.h>
45
#include <sys/sockio.h>
46
#include <sys/sysctl.h>
47
#include <sys/taskqueue.h>
48
49
#include <net/bpf.h>
50
#include <net/if.h>
51
#include <net/if_var.h>
52
#include <net/if_arp.h>
53
#include <net/ethernet.h>
54
#include <net/if_dl.h>
55
#include <net/if_media.h>
56
#include <net/if_types.h>
57
#include <net/if_vlan_var.h>
58
59
#include <netinet/in.h>
60
#include <netinet/in_systm.h>
61
#include <netinet/ip.h>
62
#include <netinet/tcp.h>
63
64
#include <dev/mii/mii.h>
65
#include <dev/mii/miivar.h>
66
#include <dev/pci/pcireg.h>
67
#include <dev/pci/pcivar.h>
68
69
#include <machine/bus.h>
70
71
#include "miibus_if.h"
72
73
#include "if_aereg.h"
74
#include "if_aevar.h"
75
76
/*
77
* Devices supported by this driver.
78
*/
79
static struct ae_dev {
80
uint16_t vendorid;
81
uint16_t deviceid;
82
const char *name;
83
} ae_devs[] = {
84
{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
85
"Attansic Technology Corp, L2 FastEthernet" },
86
};
87
#define AE_DEVS_COUNT nitems(ae_devs)
88
89
static struct resource_spec ae_res_spec_mem[] = {
90
{ SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
91
{ -1, 0, 0 }
92
};
93
static struct resource_spec ae_res_spec_irq[] = {
94
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
95
{ -1, 0, 0 }
96
};
97
static struct resource_spec ae_res_spec_msi[] = {
98
{ SYS_RES_IRQ, 1, RF_ACTIVE },
99
{ -1, 0, 0 }
100
};
101
102
static int ae_probe(device_t dev);
103
static int ae_attach(device_t dev);
104
static void ae_pcie_init(ae_softc_t *sc);
105
static void ae_phy_reset(ae_softc_t *sc);
106
static void ae_phy_init(ae_softc_t *sc);
107
static int ae_reset(ae_softc_t *sc);
108
static void ae_init(void *arg);
109
static int ae_init_locked(ae_softc_t *sc);
110
static int ae_detach(device_t dev);
111
static int ae_miibus_readreg(device_t dev, int phy, int reg);
112
static int ae_miibus_writereg(device_t dev, int phy, int reg, int val);
113
static void ae_miibus_statchg(device_t dev);
114
static void ae_mediastatus(if_t ifp, struct ifmediareq *ifmr);
115
static int ae_mediachange(if_t ifp);
116
static void ae_retrieve_address(ae_softc_t *sc);
117
static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
118
int error);
119
static int ae_alloc_rings(ae_softc_t *sc);
120
static void ae_dma_free(ae_softc_t *sc);
121
static int ae_shutdown(device_t dev);
122
static int ae_suspend(device_t dev);
123
static void ae_powersave_disable(ae_softc_t *sc);
124
static void ae_powersave_enable(ae_softc_t *sc);
125
static int ae_resume(device_t dev);
126
static unsigned int ae_tx_avail_size(ae_softc_t *sc);
127
static int ae_encap(ae_softc_t *sc, struct mbuf **m_head);
128
static void ae_start(if_t ifp);
129
static void ae_start_locked(if_t ifp);
130
static void ae_link_task(void *arg, int pending);
131
static void ae_stop_rxmac(ae_softc_t *sc);
132
static void ae_stop_txmac(ae_softc_t *sc);
133
static void ae_mac_config(ae_softc_t *sc);
134
static int ae_intr(void *arg);
135
static void ae_int_task(void *arg, int pending);
136
static void ae_tx_intr(ae_softc_t *sc);
137
static void ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
138
static void ae_rx_intr(ae_softc_t *sc);
139
static void ae_watchdog(ae_softc_t *sc);
140
static void ae_tick(void *arg);
141
static void ae_rxfilter(ae_softc_t *sc);
142
static void ae_rxvlan(ae_softc_t *sc);
143
static int ae_ioctl(if_t ifp, u_long cmd, caddr_t data);
144
static void ae_stop(ae_softc_t *sc);
145
static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
146
static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
147
static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
148
static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
149
static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
150
static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
151
static void ae_init_tunables(ae_softc_t *sc);
152
153
static device_method_t ae_methods[] = {
154
/* Device interface. */
155
DEVMETHOD(device_probe, ae_probe),
156
DEVMETHOD(device_attach, ae_attach),
157
DEVMETHOD(device_detach, ae_detach),
158
DEVMETHOD(device_shutdown, ae_shutdown),
159
DEVMETHOD(device_suspend, ae_suspend),
160
DEVMETHOD(device_resume, ae_resume),
161
162
/* MII interface. */
163
DEVMETHOD(miibus_readreg, ae_miibus_readreg),
164
DEVMETHOD(miibus_writereg, ae_miibus_writereg),
165
DEVMETHOD(miibus_statchg, ae_miibus_statchg),
166
{ NULL, NULL }
167
};
168
static driver_t ae_driver = {
169
"ae",
170
ae_methods,
171
sizeof(ae_softc_t)
172
};
173
174
DRIVER_MODULE(ae, pci, ae_driver, 0, 0);
175
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs,
176
nitems(ae_devs));
177
DRIVER_MODULE(miibus, ae, miibus_driver, 0, 0);
178
MODULE_DEPEND(ae, pci, 1, 1, 1);
179
MODULE_DEPEND(ae, ether, 1, 1, 1);
180
MODULE_DEPEND(ae, miibus, 1, 1, 1);
181
182
/*
183
* Tunables.
184
*/
185
static int msi_disable = 0;
186
TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
187
188
#define AE_READ_4(sc, reg) \
189
bus_read_4((sc)->mem[0], (reg))
190
#define AE_READ_2(sc, reg) \
191
bus_read_2((sc)->mem[0], (reg))
192
#define AE_READ_1(sc, reg) \
193
bus_read_1((sc)->mem[0], (reg))
194
#define AE_WRITE_4(sc, reg, val) \
195
bus_write_4((sc)->mem[0], (reg), (val))
196
#define AE_WRITE_2(sc, reg, val) \
197
bus_write_2((sc)->mem[0], (reg), (val))
198
#define AE_WRITE_1(sc, reg, val) \
199
bus_write_1((sc)->mem[0], (reg), (val))
200
#define AE_PHY_READ(sc, reg) \
201
ae_miibus_readreg(sc->dev, 0, reg)
202
#define AE_PHY_WRITE(sc, reg, val) \
203
ae_miibus_writereg(sc->dev, 0, reg, val)
204
#define AE_CHECK_EADDR_VALID(eaddr) \
205
((eaddr[0] == 0 && eaddr[1] == 0) || \
206
(eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
207
#define AE_RXD_VLAN(vtag) \
208
(((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
209
#define AE_TXD_VLAN(vtag) \
210
(((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
211
212
static int
213
ae_probe(device_t dev)
214
{
215
uint16_t deviceid, vendorid;
216
int i;
217
218
vendorid = pci_get_vendor(dev);
219
deviceid = pci_get_device(dev);
220
221
/*
222
* Search through the list of supported devs for matching one.
223
*/
224
for (i = 0; i < AE_DEVS_COUNT; i++) {
225
if (vendorid == ae_devs[i].vendorid &&
226
deviceid == ae_devs[i].deviceid) {
227
device_set_desc(dev, ae_devs[i].name);
228
return (BUS_PROBE_DEFAULT);
229
}
230
}
231
return (ENXIO);
232
}
233
234
static int
235
ae_attach(device_t dev)
236
{
237
ae_softc_t *sc;
238
if_t ifp;
239
uint8_t chiprev;
240
uint32_t pcirev;
241
int nmsi;
242
int error;
243
244
sc = device_get_softc(dev); /* Automatically allocated and zeroed
245
on attach. */
246
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
247
sc->dev = dev;
248
249
/*
250
* Initialize mutexes and tasks.
251
*/
252
mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
253
callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
254
TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
255
TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
256
257
pci_enable_busmaster(dev); /* Enable bus mastering. */
258
259
sc->spec_mem = ae_res_spec_mem;
260
261
/*
262
* Allocate memory-mapped registers.
263
*/
264
error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
265
if (error != 0) {
266
device_printf(dev, "could not allocate memory resources.\n");
267
sc->spec_mem = NULL;
268
goto fail;
269
}
270
271
/*
272
* Retrieve PCI and chip revisions.
273
*/
274
pcirev = pci_get_revid(dev);
275
chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
276
AE_MASTER_REVNUM_MASK;
277
if (bootverbose) {
278
device_printf(dev, "pci device revision: %#04x\n", pcirev);
279
device_printf(dev, "chip id: %#02x\n", chiprev);
280
}
281
nmsi = pci_msi_count(dev);
282
if (bootverbose)
283
device_printf(dev, "MSI count: %d.\n", nmsi);
284
285
/*
286
* Allocate interrupt resources.
287
*/
288
if (msi_disable == 0 && nmsi == 1) {
289
error = pci_alloc_msi(dev, &nmsi);
290
if (error == 0) {
291
device_printf(dev, "Using MSI messages.\n");
292
sc->spec_irq = ae_res_spec_msi;
293
error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
294
if (error != 0) {
295
device_printf(dev, "MSI allocation failed.\n");
296
sc->spec_irq = NULL;
297
pci_release_msi(dev);
298
} else {
299
sc->flags |= AE_FLAG_MSI;
300
}
301
}
302
}
303
if (sc->spec_irq == NULL) {
304
sc->spec_irq = ae_res_spec_irq;
305
error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
306
if (error != 0) {
307
device_printf(dev, "could not allocate IRQ resources.\n");
308
sc->spec_irq = NULL;
309
goto fail;
310
}
311
}
312
313
ae_init_tunables(sc);
314
315
ae_phy_reset(sc); /* Reset PHY. */
316
error = ae_reset(sc); /* Reset the controller itself. */
317
if (error != 0)
318
goto fail;
319
320
ae_pcie_init(sc);
321
322
ae_retrieve_address(sc); /* Load MAC address. */
323
324
error = ae_alloc_rings(sc); /* Allocate ring buffers. */
325
if (error != 0)
326
goto fail;
327
328
ifp = sc->ifp = if_alloc(IFT_ETHER);
329
if_setsoftc(ifp, sc);
330
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
331
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
332
if_setioctlfn(ifp, ae_ioctl);
333
if_setstartfn(ifp, ae_start);
334
if_setinitfn(ifp, ae_init);
335
if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
336
if_sethwassist(ifp, 0);
337
if_setsendqlen(ifp, ifqmaxlen);
338
if_setsendqready(ifp);
339
if (pci_has_pm(dev)) {
340
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
341
sc->flags |= AE_FLAG_PMG;
342
}
343
if_setcapenable(ifp, if_getcapabilities(ifp));
344
345
/*
346
* Configure and attach MII bus.
347
*/
348
error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
349
ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
350
MII_OFFSET_ANY, 0);
351
if (error != 0) {
352
device_printf(dev, "attaching PHYs failed\n");
353
goto fail;
354
}
355
356
ether_ifattach(ifp, sc->eaddr);
357
/* Tell the upper layer(s) we support long frames. */
358
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
359
360
/*
361
* Create and run all helper tasks.
362
*/
363
sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
364
taskqueue_thread_enqueue, &sc->tq);
365
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
366
device_get_nameunit(sc->dev));
367
368
/*
369
* Configure interrupt handlers.
370
*/
371
error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
372
ae_intr, NULL, sc, &sc->intrhand);
373
if (error != 0) {
374
device_printf(dev, "could not set up interrupt handler.\n");
375
taskqueue_free(sc->tq);
376
sc->tq = NULL;
377
ether_ifdetach(ifp);
378
goto fail;
379
}
380
381
fail:
382
if (error != 0)
383
ae_detach(dev);
384
385
return (error);
386
}
387
388
#define AE_SYSCTL(stx, parent, name, desc, ptr) \
389
SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
390
391
static void
392
ae_init_tunables(ae_softc_t *sc)
393
{
394
struct sysctl_ctx_list *ctx;
395
struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
396
struct ae_stats *ae_stats;
397
398
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
399
ae_stats = &sc->stats;
400
401
ctx = device_get_sysctl_ctx(sc->dev);
402
root = device_get_sysctl_tree(sc->dev);
403
stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
404
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics");
405
406
/*
407
* Receiver statistcics.
408
*/
409
stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
410
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
411
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
412
"broadcast frames", &ae_stats->rx_bcast);
413
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
414
"multicast frames", &ae_stats->rx_mcast);
415
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
416
"PAUSE frames", &ae_stats->rx_pause);
417
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
418
"control frames", &ae_stats->rx_ctrl);
419
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
420
"frames with CRC errors", &ae_stats->rx_crcerr);
421
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
422
"frames with invalid opcode", &ae_stats->rx_codeerr);
423
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
424
"runt frames", &ae_stats->rx_runt);
425
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
426
"fragmented frames", &ae_stats->rx_frag);
427
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
428
"frames with alignment errors", &ae_stats->rx_align);
429
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
430
"frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
431
432
/*
433
* Receiver statistcics.
434
*/
435
stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
436
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
437
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
438
"broadcast frames", &ae_stats->tx_bcast);
439
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
440
"multicast frames", &ae_stats->tx_mcast);
441
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
442
"PAUSE frames", &ae_stats->tx_pause);
443
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
444
"control frames", &ae_stats->tx_ctrl);
445
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
446
"deferrals occuried", &ae_stats->tx_defer);
447
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
448
"excessive deferrals occuried", &ae_stats->tx_excdefer);
449
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
450
"single collisions occuried", &ae_stats->tx_singlecol);
451
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
452
"multiple collisions occuried", &ae_stats->tx_multicol);
453
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
454
"late collisions occuried", &ae_stats->tx_latecol);
455
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
456
"transmit aborts due collisions", &ae_stats->tx_abortcol);
457
AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
458
"Tx FIFO underruns", &ae_stats->tx_underrun);
459
}
460
461
static void
462
ae_pcie_init(ae_softc_t *sc)
463
{
464
465
AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
466
AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
467
}
468
469
static void
470
ae_phy_reset(ae_softc_t *sc)
471
{
472
473
AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
474
DELAY(1000); /* XXX: pause(9) ? */
475
}
476
477
static int
478
ae_reset(ae_softc_t *sc)
479
{
480
int i;
481
482
/*
483
* Issue a soft reset.
484
*/
485
AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
486
bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
487
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
488
489
/*
490
* Wait for reset to complete.
491
*/
492
for (i = 0; i < AE_RESET_TIMEOUT; i++) {
493
if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
494
break;
495
DELAY(10);
496
}
497
if (i == AE_RESET_TIMEOUT) {
498
device_printf(sc->dev, "reset timeout.\n");
499
return (ENXIO);
500
}
501
502
/*
503
* Wait for everything to enter idle state.
504
*/
505
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
506
if (AE_READ_4(sc, AE_IDLE_REG) == 0)
507
break;
508
DELAY(100);
509
}
510
if (i == AE_IDLE_TIMEOUT) {
511
device_printf(sc->dev, "could not enter idle state.\n");
512
return (ENXIO);
513
}
514
return (0);
515
}
516
517
static void
518
ae_init(void *arg)
519
{
520
ae_softc_t *sc;
521
522
sc = (ae_softc_t *)arg;
523
AE_LOCK(sc);
524
ae_init_locked(sc);
525
AE_UNLOCK(sc);
526
}
527
528
static void
529
ae_phy_init(ae_softc_t *sc)
530
{
531
532
/*
533
* Enable link status change interrupt.
534
* XXX magic numbers.
535
*/
536
#ifdef notyet
537
AE_PHY_WRITE(sc, 18, 0xc00);
538
#endif
539
}
540
541
static int
542
ae_init_locked(ae_softc_t *sc)
543
{
544
if_t ifp;
545
struct mii_data *mii;
546
uint8_t eaddr[ETHER_ADDR_LEN];
547
uint32_t val;
548
bus_addr_t addr;
549
550
AE_LOCK_ASSERT(sc);
551
552
ifp = sc->ifp;
553
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
554
return (0);
555
mii = device_get_softc(sc->miibus);
556
557
ae_stop(sc);
558
ae_reset(sc);
559
ae_pcie_init(sc); /* Initialize PCIE stuff. */
560
ae_phy_init(sc);
561
ae_powersave_disable(sc);
562
563
/*
564
* Clear and disable interrupts.
565
*/
566
AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
567
568
/*
569
* Set the MAC address.
570
*/
571
bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
572
val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
573
AE_WRITE_4(sc, AE_EADDR0_REG, val);
574
val = eaddr[0] << 8 | eaddr[1];
575
AE_WRITE_4(sc, AE_EADDR1_REG, val);
576
577
bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
578
bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
579
bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
580
/*
581
* Set ring buffers base addresses.
582
*/
583
addr = sc->dma_rxd_busaddr;
584
AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
585
AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
586
addr = sc->dma_txd_busaddr;
587
AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
588
addr = sc->dma_txs_busaddr;
589
AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
590
591
/*
592
* Configure ring buffers sizes.
593
*/
594
AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
595
AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
596
AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
597
598
/*
599
* Configure interframe gap parameters.
600
*/
601
val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
602
AE_IFG_TXIPG_MASK) |
603
((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
604
AE_IFG_RXIPG_MASK) |
605
((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
606
AE_IFG_IPGR1_MASK) |
607
((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
608
AE_IFG_IPGR2_MASK);
609
AE_WRITE_4(sc, AE_IFG_REG, val);
610
611
/*
612
* Configure half-duplex operation.
613
*/
614
val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
615
AE_HDPX_LCOL_MASK) |
616
((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
617
AE_HDPX_RETRY_MASK) |
618
((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
619
AE_HDPX_ABEBT_MASK) |
620
((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
621
AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
622
AE_WRITE_4(sc, AE_HDPX_REG, val);
623
624
/*
625
* Configure interrupt moderate timer.
626
*/
627
AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
628
val = AE_READ_4(sc, AE_MASTER_REG);
629
val |= AE_MASTER_IMT_EN;
630
AE_WRITE_4(sc, AE_MASTER_REG, val);
631
632
/*
633
* Configure interrupt clearing timer.
634
*/
635
AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
636
637
/*
638
* Configure MTU.
639
*/
640
val = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
641
ETHER_CRC_LEN;
642
AE_WRITE_2(sc, AE_MTU_REG, val);
643
644
/*
645
* Configure cut-through threshold.
646
*/
647
AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
648
649
/*
650
* Configure flow control.
651
*/
652
AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
653
AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
654
(AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
655
(AE_RXD_COUNT_DEFAULT / 12));
656
657
/*
658
* Init mailboxes.
659
*/
660
sc->txd_cur = sc->rxd_cur = 0;
661
sc->txs_ack = sc->txd_ack = 0;
662
sc->rxd_cur = 0;
663
AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
664
AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
665
666
sc->tx_inproc = 0; /* Number of packets the chip processes now. */
667
sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
668
669
/*
670
* Enable DMA.
671
*/
672
AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
673
AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
674
675
/*
676
* Check if everything is OK.
677
*/
678
val = AE_READ_4(sc, AE_ISR_REG);
679
if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
680
device_printf(sc->dev, "Initialization failed.\n");
681
return (ENXIO);
682
}
683
684
/*
685
* Clear interrupt status.
686
*/
687
AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
688
AE_WRITE_4(sc, AE_ISR_REG, 0x0);
689
690
/*
691
* Enable interrupts.
692
*/
693
val = AE_READ_4(sc, AE_MASTER_REG);
694
AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
695
AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
696
697
/*
698
* Disable WOL.
699
*/
700
AE_WRITE_4(sc, AE_WOL_REG, 0);
701
702
/*
703
* Configure MAC.
704
*/
705
val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
706
AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
707
AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
708
((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
709
((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
710
AE_MAC_PREAMBLE_MASK);
711
AE_WRITE_4(sc, AE_MAC_REG, val);
712
713
/*
714
* Configure Rx MAC.
715
*/
716
ae_rxfilter(sc);
717
ae_rxvlan(sc);
718
719
/*
720
* Enable Tx/Rx.
721
*/
722
val = AE_READ_4(sc, AE_MAC_REG);
723
AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
724
725
sc->flags &= ~AE_FLAG_LINK;
726
mii_mediachg(mii); /* Switch to the current media. */
727
728
callout_reset(&sc->tick_ch, hz, ae_tick, sc);
729
730
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
731
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
732
733
#ifdef AE_DEBUG
734
device_printf(sc->dev, "Initialization complete.\n");
735
#endif
736
737
return (0);
738
}
739
740
static int
741
ae_detach(device_t dev)
742
{
743
struct ae_softc *sc;
744
if_t ifp;
745
746
sc = device_get_softc(dev);
747
KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
748
ifp = sc->ifp;
749
if (device_is_attached(dev)) {
750
AE_LOCK(sc);
751
sc->flags |= AE_FLAG_DETACH;
752
ae_stop(sc);
753
AE_UNLOCK(sc);
754
callout_drain(&sc->tick_ch);
755
taskqueue_drain(sc->tq, &sc->int_task);
756
taskqueue_drain(taskqueue_swi, &sc->link_task);
757
ether_ifdetach(ifp);
758
}
759
if (sc->tq != NULL) {
760
taskqueue_drain(sc->tq, &sc->int_task);
761
taskqueue_free(sc->tq);
762
sc->tq = NULL;
763
}
764
bus_generic_detach(sc->dev);
765
ae_dma_free(sc);
766
if (sc->intrhand != NULL) {
767
bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
768
sc->intrhand = NULL;
769
}
770
if (ifp != NULL) {
771
if_free(ifp);
772
sc->ifp = NULL;
773
}
774
if (sc->spec_irq != NULL)
775
bus_release_resources(dev, sc->spec_irq, sc->irq);
776
if (sc->spec_mem != NULL)
777
bus_release_resources(dev, sc->spec_mem, sc->mem);
778
if ((sc->flags & AE_FLAG_MSI) != 0)
779
pci_release_msi(dev);
780
mtx_destroy(&sc->mtx);
781
782
return (0);
783
}
784
785
static int
786
ae_miibus_readreg(device_t dev, int phy, int reg)
787
{
788
ae_softc_t *sc;
789
uint32_t val;
790
int i;
791
792
sc = device_get_softc(dev);
793
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
794
795
/*
796
* Locking is done in upper layers.
797
*/
798
799
val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
800
AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
801
((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
802
AE_WRITE_4(sc, AE_MDIO_REG, val);
803
804
/*
805
* Wait for operation to complete.
806
*/
807
for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
808
DELAY(2);
809
val = AE_READ_4(sc, AE_MDIO_REG);
810
if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
811
break;
812
}
813
if (i == AE_MDIO_TIMEOUT) {
814
device_printf(sc->dev, "phy read timeout: %d.\n", reg);
815
return (0);
816
}
817
return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
818
}
819
820
static int
821
ae_miibus_writereg(device_t dev, int phy, int reg, int val)
822
{
823
ae_softc_t *sc;
824
uint32_t aereg;
825
int i;
826
827
sc = device_get_softc(dev);
828
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
829
830
/*
831
* Locking is done in upper layers.
832
*/
833
834
aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
835
AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
836
((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
837
((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
838
AE_WRITE_4(sc, AE_MDIO_REG, aereg);
839
840
/*
841
* Wait for operation to complete.
842
*/
843
for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
844
DELAY(2);
845
aereg = AE_READ_4(sc, AE_MDIO_REG);
846
if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
847
break;
848
}
849
if (i == AE_MDIO_TIMEOUT) {
850
device_printf(sc->dev, "phy write timeout: %d.\n", reg);
851
}
852
return (0);
853
}
854
855
static void
856
ae_miibus_statchg(device_t dev)
857
{
858
ae_softc_t *sc;
859
860
sc = device_get_softc(dev);
861
taskqueue_enqueue(taskqueue_swi, &sc->link_task);
862
}
863
864
static void
865
ae_mediastatus(if_t ifp, struct ifmediareq *ifmr)
866
{
867
ae_softc_t *sc;
868
struct mii_data *mii;
869
870
sc = if_getsoftc(ifp);
871
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
872
873
AE_LOCK(sc);
874
mii = device_get_softc(sc->miibus);
875
mii_pollstat(mii);
876
ifmr->ifm_status = mii->mii_media_status;
877
ifmr->ifm_active = mii->mii_media_active;
878
AE_UNLOCK(sc);
879
}
880
881
static int
882
ae_mediachange(if_t ifp)
883
{
884
ae_softc_t *sc;
885
struct mii_data *mii;
886
struct mii_softc *mii_sc;
887
int error;
888
889
/* XXX: check IFF_UP ?? */
890
sc = if_getsoftc(ifp);
891
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
892
AE_LOCK(sc);
893
mii = device_get_softc(sc->miibus);
894
LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
895
PHY_RESET(mii_sc);
896
error = mii_mediachg(mii);
897
AE_UNLOCK(sc);
898
899
return (error);
900
}
901
902
static int
903
ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
904
{
905
int error;
906
uint32_t val;
907
908
KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
909
910
/*
911
* Not sure why, but Linux does this.
912
*/
913
val = AE_READ_4(sc, AE_SPICTL_REG);
914
if ((val & AE_SPICTL_VPD_EN) != 0) {
915
val &= ~AE_SPICTL_VPD_EN;
916
AE_WRITE_4(sc, AE_SPICTL_REG, val);
917
}
918
error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
919
return (error);
920
}
921
922
static int
923
ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
924
{
925
uint32_t val;
926
int i;
927
928
AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
929
930
/*
931
* VPD registers start at offset 0x100. Read them.
932
*/
933
val = 0x100 + reg * 4;
934
AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
935
AE_VPD_CAP_ADDR_MASK);
936
for (i = 0; i < AE_VPD_TIMEOUT; i++) {
937
DELAY(2000);
938
val = AE_READ_4(sc, AE_VPD_CAP_REG);
939
if ((val & AE_VPD_CAP_DONE) != 0)
940
break;
941
}
942
if (i == AE_VPD_TIMEOUT) {
943
device_printf(sc->dev, "timeout reading VPD register %d.\n",
944
reg);
945
return (ETIMEDOUT);
946
}
947
*word = AE_READ_4(sc, AE_VPD_DATA_REG);
948
return (0);
949
}
950
951
static int
952
ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
953
{
954
uint32_t word, reg, val;
955
int error;
956
int found;
957
int vpdc;
958
int i;
959
960
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
961
KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
962
963
/*
964
* Check for EEPROM.
965
*/
966
error = ae_check_eeprom_present(sc, &vpdc);
967
if (error != 0)
968
return (error);
969
970
/*
971
* Read the VPD configuration space.
972
* Each register is prefixed with signature,
973
* so we can check if it is valid.
974
*/
975
for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
976
error = ae_vpd_read_word(sc, i, &word);
977
if (error != 0)
978
break;
979
980
/*
981
* Check signature.
982
*/
983
if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
984
break;
985
reg = word >> AE_VPD_REG_SHIFT;
986
i++; /* Move to the next word. */
987
988
if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
989
continue;
990
991
error = ae_vpd_read_word(sc, i, &val);
992
if (error != 0)
993
break;
994
if (reg == AE_EADDR0_REG)
995
eaddr[0] = val;
996
else
997
eaddr[1] = val;
998
found++;
999
}
1000
1001
if (found < 2)
1002
return (ENOENT);
1003
1004
eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1005
if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1006
if (bootverbose)
1007
device_printf(sc->dev,
1008
"VPD ethernet address registers are invalid.\n");
1009
return (EINVAL);
1010
}
1011
return (0);
1012
}
1013
1014
static int
1015
ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
1016
{
1017
1018
/*
1019
* BIOS is supposed to set this.
1020
*/
1021
eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1022
eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1023
eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1024
1025
if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1026
if (bootverbose)
1027
device_printf(sc->dev,
1028
"Ethernet address registers are invalid.\n");
1029
return (EINVAL);
1030
}
1031
return (0);
1032
}
1033
1034
static void
1035
ae_retrieve_address(ae_softc_t *sc)
1036
{
1037
uint32_t eaddr[2] = {0, 0};
1038
int error;
1039
1040
/*
1041
*Check for EEPROM.
1042
*/
1043
error = ae_get_vpd_eaddr(sc, eaddr);
1044
if (error != 0)
1045
error = ae_get_reg_eaddr(sc, eaddr);
1046
if (error != 0) {
1047
if (bootverbose)
1048
device_printf(sc->dev,
1049
"Generating random ethernet address.\n");
1050
eaddr[0] = arc4random();
1051
1052
/*
1053
* Set OUI to ASUSTek COMPUTER INC.
1054
*/
1055
sc->eaddr[0] = 0x02; /* U/L bit set. */
1056
sc->eaddr[1] = 0x1f;
1057
sc->eaddr[2] = 0xc6;
1058
sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1059
sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1060
sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1061
} else {
1062
sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
1063
sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
1064
sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
1065
sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1066
sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1067
sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1068
}
1069
}
1070
1071
static void
1072
ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1073
{
1074
bus_addr_t *addr = arg;
1075
1076
if (error != 0)
1077
return;
1078
KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
1079
nsegs));
1080
*addr = segs[0].ds_addr;
1081
}
1082
1083
static int
1084
ae_alloc_rings(ae_softc_t *sc)
1085
{
1086
bus_addr_t busaddr;
1087
int error;
1088
1089
/*
1090
* Create parent DMA tag.
1091
*/
1092
error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1093
1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1094
NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
1095
BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
1096
&sc->dma_parent_tag);
1097
if (error != 0) {
1098
device_printf(sc->dev, "could not creare parent DMA tag.\n");
1099
return (error);
1100
}
1101
1102
/*
1103
* Create DMA tag for TxD.
1104
*/
1105
error = bus_dma_tag_create(sc->dma_parent_tag,
1106
8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1107
NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
1108
AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
1109
&sc->dma_txd_tag);
1110
if (error != 0) {
1111
device_printf(sc->dev, "could not creare TxD DMA tag.\n");
1112
return (error);
1113
}
1114
1115
/*
1116
* Create DMA tag for TxS.
1117
*/
1118
error = bus_dma_tag_create(sc->dma_parent_tag,
1119
8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1120
NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
1121
AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
1122
&sc->dma_txs_tag);
1123
if (error != 0) {
1124
device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1125
return (error);
1126
}
1127
1128
/*
1129
* Create DMA tag for RxD.
1130
*/
1131
error = bus_dma_tag_create(sc->dma_parent_tag,
1132
128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1133
NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
1134
AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
1135
&sc->dma_rxd_tag);
1136
if (error != 0) {
1137
device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1138
return (error);
1139
}
1140
1141
/*
1142
* Allocate TxD DMA memory.
1143
*/
1144
error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
1145
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1146
&sc->dma_txd_map);
1147
if (error != 0) {
1148
device_printf(sc->dev,
1149
"could not allocate DMA memory for TxD ring.\n");
1150
return (error);
1151
}
1152
error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
1153
AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1154
if (error != 0 || busaddr == 0) {
1155
device_printf(sc->dev,
1156
"could not load DMA map for TxD ring.\n");
1157
return (error);
1158
}
1159
sc->dma_txd_busaddr = busaddr;
1160
1161
/*
1162
* Allocate TxS DMA memory.
1163
*/
1164
error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
1165
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1166
&sc->dma_txs_map);
1167
if (error != 0) {
1168
device_printf(sc->dev,
1169
"could not allocate DMA memory for TxS ring.\n");
1170
return (error);
1171
}
1172
error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
1173
AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1174
if (error != 0 || busaddr == 0) {
1175
device_printf(sc->dev,
1176
"could not load DMA map for TxS ring.\n");
1177
return (error);
1178
}
1179
sc->dma_txs_busaddr = busaddr;
1180
1181
/*
1182
* Allocate RxD DMA memory.
1183
*/
1184
error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
1185
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1186
&sc->dma_rxd_map);
1187
if (error != 0) {
1188
device_printf(sc->dev,
1189
"could not allocate DMA memory for RxD ring.\n");
1190
return (error);
1191
}
1192
error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
1193
sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
1194
ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1195
if (error != 0 || busaddr == 0) {
1196
device_printf(sc->dev,
1197
"could not load DMA map for RxD ring.\n");
1198
return (error);
1199
}
1200
sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
1201
sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
1202
1203
return (0);
1204
}
1205
1206
static void
1207
ae_dma_free(ae_softc_t *sc)
1208
{
1209
1210
if (sc->dma_txd_tag != NULL) {
1211
if (sc->dma_txd_busaddr != 0)
1212
bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1213
if (sc->txd_base != NULL)
1214
bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1215
sc->dma_txd_map);
1216
bus_dma_tag_destroy(sc->dma_txd_tag);
1217
sc->dma_txd_tag = NULL;
1218
sc->txd_base = NULL;
1219
sc->dma_txd_busaddr = 0;
1220
}
1221
if (sc->dma_txs_tag != NULL) {
1222
if (sc->dma_txs_busaddr != 0)
1223
bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1224
if (sc->txs_base != NULL)
1225
bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1226
sc->dma_txs_map);
1227
bus_dma_tag_destroy(sc->dma_txs_tag);
1228
sc->dma_txs_tag = NULL;
1229
sc->txs_base = NULL;
1230
sc->dma_txs_busaddr = 0;
1231
}
1232
if (sc->dma_rxd_tag != NULL) {
1233
if (sc->dma_rxd_busaddr != 0)
1234
bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1235
if (sc->rxd_base_dma != NULL)
1236
bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma,
1237
sc->dma_rxd_map);
1238
bus_dma_tag_destroy(sc->dma_rxd_tag);
1239
sc->dma_rxd_tag = NULL;
1240
sc->rxd_base_dma = NULL;
1241
sc->dma_rxd_busaddr = 0;
1242
}
1243
if (sc->dma_parent_tag != NULL) {
1244
bus_dma_tag_destroy(sc->dma_parent_tag);
1245
sc->dma_parent_tag = NULL;
1246
}
1247
}
1248
1249
static int
1250
ae_shutdown(device_t dev)
1251
{
1252
ae_softc_t *sc;
1253
int error;
1254
1255
sc = device_get_softc(dev);
1256
KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
1257
1258
error = ae_suspend(dev);
1259
AE_LOCK(sc);
1260
ae_powersave_enable(sc);
1261
AE_UNLOCK(sc);
1262
return (error);
1263
}
1264
1265
static void
1266
ae_powersave_disable(ae_softc_t *sc)
1267
{
1268
uint32_t val;
1269
1270
AE_LOCK_ASSERT(sc);
1271
1272
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1273
val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1274
if (val & AE_PHY_DBG_POWERSAVE) {
1275
val &= ~AE_PHY_DBG_POWERSAVE;
1276
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1277
DELAY(1000);
1278
}
1279
}
1280
1281
static void
1282
ae_powersave_enable(ae_softc_t *sc)
1283
{
1284
uint32_t val;
1285
1286
AE_LOCK_ASSERT(sc);
1287
1288
/*
1289
* XXX magic numbers.
1290
*/
1291
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1292
val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1293
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1294
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1295
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1296
AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1297
AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1298
}
1299
1300
static void
1301
ae_pm_init(ae_softc_t *sc)
1302
{
1303
if_t ifp;
1304
uint32_t val;
1305
struct mii_data *mii;
1306
1307
AE_LOCK_ASSERT(sc);
1308
1309
ifp = sc->ifp;
1310
if ((sc->flags & AE_FLAG_PMG) == 0) {
1311
/* Disable WOL entirely. */
1312
AE_WRITE_4(sc, AE_WOL_REG, 0);
1313
return;
1314
}
1315
1316
/*
1317
* Configure WOL if enabled.
1318
*/
1319
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
1320
mii = device_get_softc(sc->miibus);
1321
mii_pollstat(mii);
1322
if ((mii->mii_media_status & IFM_AVALID) != 0 &&
1323
(mii->mii_media_status & IFM_ACTIVE) != 0) {
1324
AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
1325
AE_WOL_MAGIC_PME);
1326
1327
/*
1328
* Configure MAC.
1329
*/
1330
val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
1331
AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
1332
((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
1333
AE_HALFBUF_MASK) | \
1334
((AE_MAC_PREAMBLE_DEFAULT << \
1335
AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
1336
AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
1337
if ((IFM_OPTIONS(mii->mii_media_active) & \
1338
IFM_FDX) != 0)
1339
val |= AE_MAC_FULL_DUPLEX;
1340
AE_WRITE_4(sc, AE_MAC_REG, val);
1341
1342
} else { /* No link. */
1343
AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
1344
AE_WOL_LNKCHG_PME);
1345
AE_WRITE_4(sc, AE_MAC_REG, 0);
1346
}
1347
} else {
1348
ae_powersave_enable(sc);
1349
}
1350
1351
/*
1352
* PCIE hacks. Magic numbers.
1353
*/
1354
val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
1355
val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
1356
AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
1357
val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
1358
val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
1359
AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
1360
1361
/*
1362
* Configure PME.
1363
*/
1364
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1365
pci_enable_pme(sc->dev);
1366
}
1367
1368
static int
1369
ae_suspend(device_t dev)
1370
{
1371
ae_softc_t *sc;
1372
1373
sc = device_get_softc(dev);
1374
1375
AE_LOCK(sc);
1376
ae_stop(sc);
1377
ae_pm_init(sc);
1378
AE_UNLOCK(sc);
1379
1380
return (0);
1381
}
1382
1383
static int
1384
ae_resume(device_t dev)
1385
{
1386
ae_softc_t *sc;
1387
1388
sc = device_get_softc(dev);
1389
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1390
1391
AE_LOCK(sc);
1392
AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1393
if ((if_getflags(sc->ifp) & IFF_UP) != 0)
1394
ae_init_locked(sc);
1395
AE_UNLOCK(sc);
1396
1397
return (0);
1398
}
1399
1400
static unsigned int
1401
ae_tx_avail_size(ae_softc_t *sc)
1402
{
1403
unsigned int avail;
1404
1405
if (sc->txd_cur >= sc->txd_ack)
1406
avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1407
else
1408
avail = sc->txd_ack - sc->txd_cur;
1409
1410
return (avail);
1411
}
1412
1413
static int
1414
ae_encap(ae_softc_t *sc, struct mbuf **m_head)
1415
{
1416
struct mbuf *m0;
1417
ae_txd_t *hdr;
1418
unsigned int to_end;
1419
uint16_t len;
1420
1421
AE_LOCK_ASSERT(sc);
1422
1423
m0 = *m_head;
1424
len = m0->m_pkthdr.len;
1425
1426
if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
1427
len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
1428
#ifdef AE_DEBUG
1429
if_printf(sc->ifp, "No free Tx available.\n");
1430
#endif
1431
return ENOBUFS;
1432
}
1433
1434
hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
1435
bzero(hdr, sizeof(*hdr));
1436
/* Skip header size. */
1437
sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
1438
/* Space available to the end of the ring */
1439
to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1440
if (to_end >= len) {
1441
m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1442
} else {
1443
m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1444
sc->txd_cur));
1445
m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1446
}
1447
1448
/*
1449
* Set TxD flags and parameters.
1450
*/
1451
if ((m0->m_flags & M_VLANTAG) != 0) {
1452
hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
1453
hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1454
} else {
1455
hdr->len = htole16(len);
1456
}
1457
1458
/*
1459
* Set current TxD position and round up to a 4-byte boundary.
1460
*/
1461
sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1462
if (sc->txd_cur == sc->txd_ack)
1463
sc->flags &= ~AE_FLAG_TXAVAIL;
1464
#ifdef AE_DEBUG
1465
if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1466
#endif
1467
1468
/*
1469
* Update TxS position and check if there are empty TxS available.
1470
*/
1471
sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1472
sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1473
if (sc->txs_cur == sc->txs_ack)
1474
sc->flags &= ~AE_FLAG_TXAVAIL;
1475
1476
/*
1477
* Synchronize DMA memory.
1478
*/
1479
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
1480
BUS_DMASYNC_PREWRITE);
1481
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1482
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1483
1484
return (0);
1485
}
1486
1487
static void
1488
ae_start(if_t ifp)
1489
{
1490
ae_softc_t *sc;
1491
1492
sc = if_getsoftc(ifp);
1493
AE_LOCK(sc);
1494
ae_start_locked(ifp);
1495
AE_UNLOCK(sc);
1496
}
1497
1498
static void
1499
ae_start_locked(if_t ifp)
1500
{
1501
ae_softc_t *sc;
1502
unsigned int count;
1503
struct mbuf *m0;
1504
int error;
1505
1506
sc = if_getsoftc(ifp);
1507
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1508
AE_LOCK_ASSERT(sc);
1509
1510
#ifdef AE_DEBUG
1511
if_printf(ifp, "Start called.\n");
1512
#endif
1513
1514
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1515
IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
1516
return;
1517
1518
count = 0;
1519
while (!if_sendq_empty(ifp)) {
1520
m0 = if_dequeue(ifp);
1521
if (m0 == NULL)
1522
break; /* Nothing to do. */
1523
1524
error = ae_encap(sc, &m0);
1525
if (error != 0) {
1526
if (m0 != NULL) {
1527
if_sendq_prepend(ifp, m0);
1528
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1529
#ifdef AE_DEBUG
1530
if_printf(ifp, "Setting OACTIVE.\n");
1531
#endif
1532
}
1533
break;
1534
}
1535
count++;
1536
sc->tx_inproc++;
1537
1538
/* Bounce a copy of the frame to BPF. */
1539
ETHER_BPF_MTAP(ifp, m0);
1540
1541
m_freem(m0);
1542
}
1543
1544
if (count > 0) { /* Something was dequeued. */
1545
AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1546
sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1547
#ifdef AE_DEBUG
1548
if_printf(ifp, "%d packets dequeued.\n", count);
1549
if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1550
#endif
1551
}
1552
}
1553
1554
static void
1555
ae_link_task(void *arg, int pending)
1556
{
1557
ae_softc_t *sc;
1558
struct mii_data *mii;
1559
if_t ifp;
1560
uint32_t val;
1561
1562
sc = (ae_softc_t *)arg;
1563
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1564
AE_LOCK(sc);
1565
1566
ifp = sc->ifp;
1567
mii = device_get_softc(sc->miibus);
1568
if (mii == NULL || ifp == NULL ||
1569
(if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1570
AE_UNLOCK(sc); /* XXX: could happen? */
1571
return;
1572
}
1573
1574
sc->flags &= ~AE_FLAG_LINK;
1575
if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
1576
(IFM_AVALID | IFM_ACTIVE)) {
1577
switch(IFM_SUBTYPE(mii->mii_media_active)) {
1578
case IFM_10_T:
1579
case IFM_100_TX:
1580
sc->flags |= AE_FLAG_LINK;
1581
break;
1582
default:
1583
break;
1584
}
1585
}
1586
1587
/*
1588
* Stop Rx/Tx MACs.
1589
*/
1590
ae_stop_rxmac(sc);
1591
ae_stop_txmac(sc);
1592
1593
if ((sc->flags & AE_FLAG_LINK) != 0) {
1594
ae_mac_config(sc);
1595
1596
/*
1597
* Restart DMA engines.
1598
*/
1599
AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
1600
AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
1601
1602
/*
1603
* Enable Rx and Tx MACs.
1604
*/
1605
val = AE_READ_4(sc, AE_MAC_REG);
1606
val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
1607
AE_WRITE_4(sc, AE_MAC_REG, val);
1608
}
1609
AE_UNLOCK(sc);
1610
}
1611
1612
static void
1613
ae_stop_rxmac(ae_softc_t *sc)
1614
{
1615
uint32_t val;
1616
int i;
1617
1618
AE_LOCK_ASSERT(sc);
1619
1620
/*
1621
* Stop Rx MAC engine.
1622
*/
1623
val = AE_READ_4(sc, AE_MAC_REG);
1624
if ((val & AE_MAC_RX_EN) != 0) {
1625
val &= ~AE_MAC_RX_EN;
1626
AE_WRITE_4(sc, AE_MAC_REG, val);
1627
}
1628
1629
/*
1630
* Stop Rx DMA engine.
1631
*/
1632
if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
1633
AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
1634
1635
/*
1636
* Wait for IDLE state.
1637
*/
1638
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1639
val = AE_READ_4(sc, AE_IDLE_REG);
1640
if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
1641
break;
1642
DELAY(100);
1643
}
1644
if (i == AE_IDLE_TIMEOUT)
1645
device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
1646
}
1647
1648
static void
1649
ae_stop_txmac(ae_softc_t *sc)
1650
{
1651
uint32_t val;
1652
int i;
1653
1654
AE_LOCK_ASSERT(sc);
1655
1656
/*
1657
* Stop Tx MAC engine.
1658
*/
1659
val = AE_READ_4(sc, AE_MAC_REG);
1660
if ((val & AE_MAC_TX_EN) != 0) {
1661
val &= ~AE_MAC_TX_EN;
1662
AE_WRITE_4(sc, AE_MAC_REG, val);
1663
}
1664
1665
/*
1666
* Stop Tx DMA engine.
1667
*/
1668
if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
1669
AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
1670
1671
/*
1672
* Wait for IDLE state.
1673
*/
1674
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1675
val = AE_READ_4(sc, AE_IDLE_REG);
1676
if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
1677
break;
1678
DELAY(100);
1679
}
1680
if (i == AE_IDLE_TIMEOUT)
1681
device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
1682
}
1683
1684
static void
1685
ae_mac_config(ae_softc_t *sc)
1686
{
1687
struct mii_data *mii;
1688
uint32_t val;
1689
1690
AE_LOCK_ASSERT(sc);
1691
1692
mii = device_get_softc(sc->miibus);
1693
val = AE_READ_4(sc, AE_MAC_REG);
1694
val &= ~AE_MAC_FULL_DUPLEX;
1695
/* XXX disable AE_MAC_TX_FLOW_EN? */
1696
1697
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1698
val |= AE_MAC_FULL_DUPLEX;
1699
1700
AE_WRITE_4(sc, AE_MAC_REG, val);
1701
}
1702
1703
static int
1704
ae_intr(void *arg)
1705
{
1706
ae_softc_t *sc;
1707
uint32_t val;
1708
1709
sc = (ae_softc_t *)arg;
1710
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1711
1712
val = AE_READ_4(sc, AE_ISR_REG);
1713
if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
1714
return (FILTER_STRAY);
1715
1716
/* Disable interrupts. */
1717
AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
1718
1719
/* Schedule interrupt processing. */
1720
taskqueue_enqueue(sc->tq, &sc->int_task);
1721
1722
return (FILTER_HANDLED);
1723
}
1724
1725
static void
1726
ae_int_task(void *arg, int pending)
1727
{
1728
ae_softc_t *sc;
1729
if_t ifp;
1730
uint32_t val;
1731
1732
sc = (ae_softc_t *)arg;
1733
1734
AE_LOCK(sc);
1735
1736
ifp = sc->ifp;
1737
1738
val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */
1739
if (val == 0) {
1740
AE_UNLOCK(sc);
1741
return;
1742
}
1743
1744
/*
1745
* Clear interrupts and disable them.
1746
*/
1747
AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
1748
1749
#ifdef AE_DEBUG
1750
if_printf(ifp, "Interrupt received: 0x%08x\n", val);
1751
#endif
1752
1753
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1754
if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
1755
AE_ISR_PHY_LINKDOWN)) != 0) {
1756
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1757
ae_init_locked(sc);
1758
AE_UNLOCK(sc);
1759
return;
1760
}
1761
if ((val & AE_ISR_TX_EVENT) != 0)
1762
ae_tx_intr(sc);
1763
if ((val & AE_ISR_RX_EVENT) != 0)
1764
ae_rx_intr(sc);
1765
/*
1766
* Re-enable interrupts.
1767
*/
1768
AE_WRITE_4(sc, AE_ISR_REG, 0);
1769
1770
if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
1771
if (!if_sendq_empty(ifp))
1772
ae_start_locked(ifp);
1773
}
1774
}
1775
1776
AE_UNLOCK(sc);
1777
}
1778
1779
static void
1780
ae_tx_intr(ae_softc_t *sc)
1781
{
1782
if_t ifp;
1783
ae_txd_t *txd;
1784
ae_txs_t *txs;
1785
uint16_t flags;
1786
1787
AE_LOCK_ASSERT(sc);
1788
1789
ifp = sc->ifp;
1790
1791
#ifdef AE_DEBUG
1792
if_printf(ifp, "Tx interrupt occuried.\n");
1793
#endif
1794
1795
/*
1796
* Syncronize DMA buffers.
1797
*/
1798
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1799
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1800
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1801
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1802
1803
for (;;) {
1804
txs = sc->txs_base + sc->txs_ack;
1805
flags = le16toh(txs->flags);
1806
if ((flags & AE_TXS_UPDATE) == 0)
1807
break;
1808
txs->flags = htole16(flags & ~AE_TXS_UPDATE);
1809
/* Update stats. */
1810
ae_update_stats_tx(flags, &sc->stats);
1811
1812
/*
1813
* Update TxS position.
1814
*/
1815
sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
1816
sc->flags |= AE_FLAG_TXAVAIL;
1817
1818
txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
1819
if (txs->len != txd->len)
1820
device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
1821
le16toh(txs->len), le16toh(txd->len));
1822
1823
/*
1824
* Move txd ack and align on 4-byte boundary.
1825
*/
1826
sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
1827
sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1828
1829
if ((flags & AE_TXS_SUCCESS) != 0)
1830
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1831
else
1832
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1833
1834
sc->tx_inproc--;
1835
}
1836
1837
if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
1838
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1839
if (sc->tx_inproc < 0) {
1840
if_printf(ifp, "Received stray Tx interrupt(s).\n");
1841
sc->tx_inproc = 0;
1842
}
1843
1844
if (sc->tx_inproc == 0)
1845
sc->wd_timer = 0; /* Unarm watchdog. */
1846
1847
/*
1848
* Syncronize DMA buffers.
1849
*/
1850
bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1851
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1852
bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1853
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1854
}
1855
1856
static void
1857
ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
1858
{
1859
if_t ifp;
1860
struct mbuf *m;
1861
unsigned int size;
1862
uint16_t flags;
1863
1864
AE_LOCK_ASSERT(sc);
1865
1866
ifp = sc->ifp;
1867
flags = le16toh(rxd->flags);
1868
1869
#ifdef AE_DEBUG
1870
if_printf(ifp, "Rx interrupt occuried.\n");
1871
#endif
1872
size = le16toh(rxd->len) - ETHER_CRC_LEN;
1873
if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
1874
if_printf(ifp, "Runt frame received.");
1875
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1876
return;
1877
}
1878
1879
m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
1880
if (m == NULL) {
1881
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1882
return;
1883
}
1884
1885
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
1886
(flags & AE_RXD_HAS_VLAN) != 0) {
1887
m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
1888
m->m_flags |= M_VLANTAG;
1889
}
1890
1891
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1892
/*
1893
* Pass it through.
1894
*/
1895
AE_UNLOCK(sc);
1896
if_input(ifp, m);
1897
AE_LOCK(sc);
1898
}
1899
1900
static void
1901
ae_rx_intr(ae_softc_t *sc)
1902
{
1903
ae_rxd_t *rxd;
1904
if_t ifp;
1905
uint16_t flags;
1906
int count;
1907
1908
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1909
1910
AE_LOCK_ASSERT(sc);
1911
1912
ifp = sc->ifp;
1913
1914
/*
1915
* Syncronize DMA buffers.
1916
*/
1917
bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1918
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1919
1920
for (count = 0;; count++) {
1921
rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
1922
flags = le16toh(rxd->flags);
1923
if ((flags & AE_RXD_UPDATE) == 0)
1924
break;
1925
rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
1926
/* Update stats. */
1927
ae_update_stats_rx(flags, &sc->stats);
1928
1929
/*
1930
* Update position index.
1931
*/
1932
sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
1933
1934
if ((flags & AE_RXD_SUCCESS) != 0)
1935
ae_rxeof(sc, rxd);
1936
else
1937
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1938
}
1939
1940
if (count > 0) {
1941
bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1942
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1943
/*
1944
* Update Rx index.
1945
*/
1946
AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
1947
}
1948
}
1949
1950
static void
1951
ae_watchdog(ae_softc_t *sc)
1952
{
1953
if_t ifp;
1954
1955
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1956
AE_LOCK_ASSERT(sc);
1957
ifp = sc->ifp;
1958
1959
if (sc->wd_timer == 0 || --sc->wd_timer != 0)
1960
return; /* Noting to do. */
1961
1962
if ((sc->flags & AE_FLAG_LINK) == 0)
1963
if_printf(ifp, "watchdog timeout (missed link).\n");
1964
else
1965
if_printf(ifp, "watchdog timeout - resetting.\n");
1966
1967
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1968
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1969
ae_init_locked(sc);
1970
if (!if_sendq_empty(ifp))
1971
ae_start_locked(ifp);
1972
}
1973
1974
static void
1975
ae_tick(void *arg)
1976
{
1977
ae_softc_t *sc;
1978
struct mii_data *mii;
1979
1980
sc = (ae_softc_t *)arg;
1981
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1982
AE_LOCK_ASSERT(sc);
1983
1984
mii = device_get_softc(sc->miibus);
1985
mii_tick(mii);
1986
ae_watchdog(sc); /* Watchdog check. */
1987
callout_reset(&sc->tick_ch, hz, ae_tick, sc);
1988
}
1989
1990
static void
1991
ae_rxvlan(ae_softc_t *sc)
1992
{
1993
if_t ifp;
1994
uint32_t val;
1995
1996
AE_LOCK_ASSERT(sc);
1997
ifp = sc->ifp;
1998
val = AE_READ_4(sc, AE_MAC_REG);
1999
val &= ~AE_MAC_RMVLAN_EN;
2000
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2001
val |= AE_MAC_RMVLAN_EN;
2002
AE_WRITE_4(sc, AE_MAC_REG, val);
2003
}
2004
2005
static u_int
2006
ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2007
{
2008
uint32_t crc, *mchash = arg;
2009
2010
crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
2011
mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2012
2013
return (1);
2014
}
2015
2016
static void
2017
ae_rxfilter(ae_softc_t *sc)
2018
{
2019
if_t ifp;
2020
uint32_t mchash[2];
2021
uint32_t rxcfg;
2022
2023
KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2024
2025
AE_LOCK_ASSERT(sc);
2026
2027
ifp = sc->ifp;
2028
2029
rxcfg = AE_READ_4(sc, AE_MAC_REG);
2030
rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
2031
2032
if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
2033
rxcfg |= AE_MAC_BCAST_EN;
2034
if ((if_getflags(ifp) & IFF_PROMISC) != 0)
2035
rxcfg |= AE_MAC_PROMISC_EN;
2036
if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
2037
rxcfg |= AE_MAC_MCAST_EN;
2038
2039
/*
2040
* Wipe old settings.
2041
*/
2042
AE_WRITE_4(sc, AE_REG_MHT0, 0);
2043
AE_WRITE_4(sc, AE_REG_MHT1, 0);
2044
if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2045
AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
2046
AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
2047
AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2048
return;
2049
}
2050
2051
/*
2052
* Load multicast tables.
2053
*/
2054
bzero(mchash, sizeof(mchash));
2055
if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash);
2056
AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
2057
AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
2058
AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2059
}
2060
2061
static int
2062
ae_ioctl(if_t ifp, u_long cmd, caddr_t data)
2063
{
2064
struct ae_softc *sc;
2065
struct ifreq *ifr;
2066
struct mii_data *mii;
2067
int error, mask;
2068
2069
sc = if_getsoftc(ifp);
2070
ifr = (struct ifreq *)data;
2071
error = 0;
2072
2073
switch (cmd) {
2074
case SIOCSIFMTU:
2075
if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2076
error = EINVAL;
2077
else if (if_getmtu(ifp) != ifr->ifr_mtu) {
2078
AE_LOCK(sc);
2079
if_setmtu(ifp, ifr->ifr_mtu);
2080
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2081
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2082
ae_init_locked(sc);
2083
}
2084
AE_UNLOCK(sc);
2085
}
2086
break;
2087
case SIOCSIFFLAGS:
2088
AE_LOCK(sc);
2089
if ((if_getflags(ifp) & IFF_UP) != 0) {
2090
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2091
if (((if_getflags(ifp) ^ sc->if_flags)
2092
& (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2093
ae_rxfilter(sc);
2094
} else {
2095
if ((sc->flags & AE_FLAG_DETACH) == 0)
2096
ae_init_locked(sc);
2097
}
2098
} else {
2099
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2100
ae_stop(sc);
2101
}
2102
sc->if_flags = if_getflags(ifp);
2103
AE_UNLOCK(sc);
2104
break;
2105
case SIOCADDMULTI:
2106
case SIOCDELMULTI:
2107
AE_LOCK(sc);
2108
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2109
ae_rxfilter(sc);
2110
AE_UNLOCK(sc);
2111
break;
2112
case SIOCSIFMEDIA:
2113
case SIOCGIFMEDIA:
2114
mii = device_get_softc(sc->miibus);
2115
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2116
break;
2117
case SIOCSIFCAP:
2118
AE_LOCK(sc);
2119
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2120
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2121
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2122
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2123
ae_rxvlan(sc);
2124
}
2125
VLAN_CAPABILITIES(ifp);
2126
AE_UNLOCK(sc);
2127
break;
2128
default:
2129
error = ether_ioctl(ifp, cmd, data);
2130
break;
2131
}
2132
return (error);
2133
}
2134
2135
static void
2136
ae_stop(ae_softc_t *sc)
2137
{
2138
if_t ifp;
2139
int i;
2140
2141
AE_LOCK_ASSERT(sc);
2142
2143
ifp = sc->ifp;
2144
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2145
sc->flags &= ~AE_FLAG_LINK;
2146
sc->wd_timer = 0; /* Cancel watchdog. */
2147
callout_stop(&sc->tick_ch);
2148
2149
/*
2150
* Clear and disable interrupts.
2151
*/
2152
AE_WRITE_4(sc, AE_IMR_REG, 0);
2153
AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
2154
2155
/*
2156
* Stop Rx/Tx MACs.
2157
*/
2158
ae_stop_txmac(sc);
2159
ae_stop_rxmac(sc);
2160
2161
/*
2162
* Stop DMA engines.
2163
*/
2164
AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
2165
AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
2166
2167
/*
2168
* Wait for everything to enter idle state.
2169
*/
2170
for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
2171
if (AE_READ_4(sc, AE_IDLE_REG) == 0)
2172
break;
2173
DELAY(100);
2174
}
2175
if (i == AE_IDLE_TIMEOUT)
2176
device_printf(sc->dev, "could not enter idle state in stop.\n");
2177
}
2178
2179
static void
2180
ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
2181
{
2182
2183
if ((flags & AE_TXS_BCAST) != 0)
2184
stats->tx_bcast++;
2185
if ((flags & AE_TXS_MCAST) != 0)
2186
stats->tx_mcast++;
2187
if ((flags & AE_TXS_PAUSE) != 0)
2188
stats->tx_pause++;
2189
if ((flags & AE_TXS_CTRL) != 0)
2190
stats->tx_ctrl++;
2191
if ((flags & AE_TXS_DEFER) != 0)
2192
stats->tx_defer++;
2193
if ((flags & AE_TXS_EXCDEFER) != 0)
2194
stats->tx_excdefer++;
2195
if ((flags & AE_TXS_SINGLECOL) != 0)
2196
stats->tx_singlecol++;
2197
if ((flags & AE_TXS_MULTICOL) != 0)
2198
stats->tx_multicol++;
2199
if ((flags & AE_TXS_LATECOL) != 0)
2200
stats->tx_latecol++;
2201
if ((flags & AE_TXS_ABORTCOL) != 0)
2202
stats->tx_abortcol++;
2203
if ((flags & AE_TXS_UNDERRUN) != 0)
2204
stats->tx_underrun++;
2205
}
2206
2207
static void
2208
ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
2209
{
2210
2211
if ((flags & AE_RXD_BCAST) != 0)
2212
stats->rx_bcast++;
2213
if ((flags & AE_RXD_MCAST) != 0)
2214
stats->rx_mcast++;
2215
if ((flags & AE_RXD_PAUSE) != 0)
2216
stats->rx_pause++;
2217
if ((flags & AE_RXD_CTRL) != 0)
2218
stats->rx_ctrl++;
2219
if ((flags & AE_RXD_CRCERR) != 0)
2220
stats->rx_crcerr++;
2221
if ((flags & AE_RXD_CODEERR) != 0)
2222
stats->rx_codeerr++;
2223
if ((flags & AE_RXD_RUNT) != 0)
2224
stats->rx_runt++;
2225
if ((flags & AE_RXD_FRAG) != 0)
2226
stats->rx_frag++;
2227
if ((flags & AE_RXD_TRUNC) != 0)
2228
stats->rx_trunc++;
2229
if ((flags & AE_RXD_ALIGN) != 0)
2230
stats->rx_align++;
2231
}
2232
2233