Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/ath/if_ath_rx.c
39534 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer,
12
* without modification.
13
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
14
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
15
* redistribution must be conditioned upon including a substantially
16
* similar Disclaimer requirement for further binary redistribution.
17
*
18
* NO WARRANTY
19
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
22
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
23
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
24
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
27
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
29
* THE POSSIBILITY OF SUCH DAMAGES.
30
*/
31
32
#include <sys/cdefs.h>
33
/*
34
* Driver for the Atheros Wireless LAN controller.
35
*
36
* This software is derived from work of Atsushi Onoe; his contribution
37
* is greatly appreciated.
38
*/
39
40
#include "opt_inet.h"
41
#include "opt_ath.h"
42
/*
43
* This is needed for register operations which are performed
44
* by the driver - eg, calls to ath_hal_gettsf32().
45
*
46
* It's also required for any AH_DEBUG checks in here, eg the
47
* module dependencies.
48
*/
49
#include "opt_ah.h"
50
#include "opt_wlan.h"
51
52
#include <sys/param.h>
53
#include <sys/systm.h>
54
#include <sys/sysctl.h>
55
#include <sys/mbuf.h>
56
#include <sys/malloc.h>
57
#include <sys/lock.h>
58
#include <sys/mutex.h>
59
#include <sys/kernel.h>
60
#include <sys/socket.h>
61
#include <sys/sockio.h>
62
#include <sys/errno.h>
63
#include <sys/callout.h>
64
#include <sys/bus.h>
65
#include <sys/endian.h>
66
#include <sys/kthread.h>
67
#include <sys/taskqueue.h>
68
#include <sys/priv.h>
69
#include <sys/module.h>
70
#include <sys/ktr.h>
71
#include <sys/smp.h> /* for mp_ncpus */
72
73
#include <machine/bus.h>
74
75
#include <net/if.h>
76
#include <net/if_var.h>
77
#include <net/if_dl.h>
78
#include <net/if_media.h>
79
#include <net/if_types.h>
80
#include <net/if_arp.h>
81
#include <net/ethernet.h>
82
#include <net/if_llc.h>
83
84
#include <net80211/ieee80211_var.h>
85
#include <net80211/ieee80211_regdomain.h>
86
#ifdef IEEE80211_SUPPORT_SUPERG
87
#include <net80211/ieee80211_superg.h>
88
#endif
89
#ifdef IEEE80211_SUPPORT_TDMA
90
#include <net80211/ieee80211_tdma.h>
91
#endif
92
93
#include <net/bpf.h>
94
95
#ifdef INET
96
#include <netinet/in.h>
97
#include <netinet/if_ether.h>
98
#endif
99
100
#include <dev/ath/if_athvar.h>
101
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
102
#include <dev/ath/ath_hal/ah_diagcodes.h>
103
104
#include <dev/ath/if_ath_debug.h>
105
#include <dev/ath/if_ath_misc.h>
106
#include <dev/ath/if_ath_tsf.h>
107
#include <dev/ath/if_ath_tx.h>
108
#include <dev/ath/if_ath_sysctl.h>
109
#include <dev/ath/if_ath_led.h>
110
#include <dev/ath/if_ath_keycache.h>
111
#include <dev/ath/if_ath_rx.h>
112
#include <dev/ath/if_ath_beacon.h>
113
#include <dev/ath/if_athdfs.h>
114
#include <dev/ath/if_ath_descdma.h>
115
116
#ifdef ATH_TX99_DIAG
117
#include <dev/ath/ath_tx99/ath_tx99.h>
118
#endif
119
120
#ifdef ATH_DEBUG_ALQ
121
#include <dev/ath/if_ath_alq.h>
122
#endif
123
124
#include <dev/ath/if_ath_lna_div.h>
125
126
/*
127
* Calculate the receive filter according to the
128
* operating mode and state:
129
*
130
* o always accept unicast, broadcast, and multicast traffic
131
* o accept PHY error frames when hardware doesn't have MIB support
132
* to count and we need them for ANI (sta mode only until recently)
133
* and we are not scanning (ANI is disabled)
134
* NB: older hal's add rx filter bits out of sight and we need to
135
* blindly preserve them
136
* o probe request frames are accepted only when operating in
137
* hostap, adhoc, mesh, or monitor modes
138
* o enable promiscuous mode
139
* - when in monitor mode
140
* - if interface marked PROMISC (assumes bridge setting is filtered)
141
* o accept beacons:
142
* - when operating in station mode for collecting rssi data when
143
* the station is otherwise quiet, or
144
* - when operating in adhoc mode so the 802.11 layer creates
145
* node table entries for peers,
146
* - when scanning
147
* - when doing s/w beacon miss (e.g. for ap+sta)
148
* - when operating in ap mode in 11g to detect overlapping bss that
149
* require protection
150
* - when operating in mesh mode to detect neighbors
151
* o accept control frames:
152
* - when in monitor mode
153
* XXX HT protection for 11n
154
*/
155
u_int32_t
156
ath_calcrxfilter(struct ath_softc *sc)
157
{
158
struct ieee80211com *ic = &sc->sc_ic;
159
u_int32_t rfilt;
160
161
rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
162
if (!sc->sc_needmib && !sc->sc_scanning)
163
rfilt |= HAL_RX_FILTER_PHYERR;
164
if (ic->ic_opmode != IEEE80211_M_STA)
165
rfilt |= HAL_RX_FILTER_PROBEREQ;
166
/* XXX ic->ic_monvaps != 0? */
167
if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_promisc > 0)
168
rfilt |= HAL_RX_FILTER_PROM;
169
170
/*
171
* Only listen to all beacons if we're scanning.
172
*
173
* Otherwise we only really need to hear beacons from
174
* our own BSSID.
175
*
176
* IBSS? software beacon miss? Just receive all beacons.
177
* We need to hear beacons/probe requests from everyone so
178
* we can merge ibss.
179
*/
180
if (ic->ic_opmode == IEEE80211_M_IBSS || sc->sc_swbmiss) {
181
rfilt |= HAL_RX_FILTER_BEACON;
182
} else if (ic->ic_opmode == IEEE80211_M_STA) {
183
if (sc->sc_do_mybeacon && ! sc->sc_scanning) {
184
rfilt |= HAL_RX_FILTER_MYBEACON;
185
} else { /* scanning, non-mybeacon chips */
186
rfilt |= HAL_RX_FILTER_BEACON;
187
}
188
}
189
190
/*
191
* NB: We don't recalculate the rx filter when
192
* ic_protmode changes; otherwise we could do
193
* this only when ic_protmode != NONE.
194
*/
195
if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
196
IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
197
rfilt |= HAL_RX_FILTER_BEACON;
198
199
/*
200
* Enable hardware PS-POLL RX only for hostap mode;
201
* STA mode sends PS-POLL frames but never
202
* receives them.
203
*/
204
if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
205
0, NULL) == HAL_OK &&
206
ic->ic_opmode == IEEE80211_M_HOSTAP)
207
rfilt |= HAL_RX_FILTER_PSPOLL;
208
209
if (sc->sc_nmeshvaps) {
210
rfilt |= HAL_RX_FILTER_BEACON;
211
if (sc->sc_hasbmatch)
212
rfilt |= HAL_RX_FILTER_BSSID;
213
else
214
rfilt |= HAL_RX_FILTER_PROM;
215
}
216
if (ic->ic_opmode == IEEE80211_M_MONITOR)
217
rfilt |= HAL_RX_FILTER_CONTROL;
218
219
/*
220
* Enable RX of compressed BAR frames only when doing
221
* 802.11n. Required for A-MPDU.
222
*/
223
if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
224
rfilt |= HAL_RX_FILTER_COMPBAR;
225
226
/*
227
* Enable radar PHY errors if requested by the
228
* DFS module.
229
*/
230
if (sc->sc_dodfs)
231
rfilt |= HAL_RX_FILTER_PHYRADAR;
232
233
/*
234
* Enable spectral PHY errors if requested by the
235
* spectral module.
236
*/
237
if (sc->sc_dospectral)
238
rfilt |= HAL_RX_FILTER_PHYRADAR;
239
240
DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s\n",
241
__func__, rfilt, ieee80211_opmode_name[ic->ic_opmode]);
242
return rfilt;
243
}
244
245
static int
246
ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
247
{
248
struct ath_hal *ah = sc->sc_ah;
249
int error;
250
struct mbuf *m;
251
struct ath_desc *ds;
252
253
/* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */
254
255
m = bf->bf_m;
256
if (m == NULL) {
257
/*
258
* NB: by assigning a page to the rx dma buffer we
259
* implicitly satisfy the Atheros requirement that
260
* this buffer be cache-line-aligned and sized to be
261
* multiple of the cache line size. Not doing this
262
* causes weird stuff to happen (for the 5210 at least).
263
*/
264
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
265
if (m == NULL) {
266
DPRINTF(sc, ATH_DEBUG_ANY,
267
"%s: no mbuf/cluster\n", __func__);
268
sc->sc_stats.ast_rx_nombuf++;
269
return ENOMEM;
270
}
271
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
272
273
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
274
bf->bf_dmamap, m,
275
bf->bf_segs, &bf->bf_nseg,
276
BUS_DMA_NOWAIT);
277
if (error != 0) {
278
DPRINTF(sc, ATH_DEBUG_ANY,
279
"%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
280
__func__, error);
281
sc->sc_stats.ast_rx_busdma++;
282
m_freem(m);
283
return error;
284
}
285
KASSERT(bf->bf_nseg == 1,
286
("multi-segment packet; nseg %u", bf->bf_nseg));
287
bf->bf_m = m;
288
}
289
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
290
291
/*
292
* Setup descriptors. For receive we always terminate
293
* the descriptor list with a self-linked entry so we'll
294
* not get overrun under high load (as can happen with a
295
* 5212 when ANI processing enables PHY error frames).
296
*
297
* To insure the last descriptor is self-linked we create
298
* each descriptor as self-linked and add it to the end. As
299
* each additional descriptor is added the previous self-linked
300
* entry is ``fixed'' naturally. This should be safe even
301
* if DMA is happening. When processing RX interrupts we
302
* never remove/process the last, self-linked, entry on the
303
* descriptor list. This insures the hardware always has
304
* someplace to write a new frame.
305
*/
306
/*
307
* 11N: we can no longer afford to self link the last descriptor.
308
* MAC acknowledges BA status as long as it copies frames to host
309
* buffer (or rx fifo). This can incorrectly acknowledge packets
310
* to a sender if last desc is self-linked.
311
*/
312
ds = bf->bf_desc;
313
if (sc->sc_rxslink)
314
ds->ds_link = bf->bf_daddr; /* link to self */
315
else
316
ds->ds_link = 0; /* terminate the list */
317
ds->ds_data = bf->bf_segs[0].ds_addr;
318
ath_hal_setuprxdesc(ah, ds
319
, m->m_len /* buffer size */
320
, 0
321
);
322
323
if (sc->sc_rxlink != NULL)
324
*sc->sc_rxlink = bf->bf_daddr;
325
sc->sc_rxlink = &ds->ds_link;
326
return 0;
327
}
328
329
/*
330
* Intercept management frames to collect beacon rssi data
331
* and to do ibss merges.
332
*/
333
void
334
ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
335
int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf)
336
{
337
struct ieee80211vap *vap = ni->ni_vap;
338
struct ath_softc *sc = vap->iv_ic->ic_softc;
339
uint64_t tsf_beacon_old, tsf_beacon;
340
uint64_t nexttbtt;
341
int64_t tsf_delta;
342
int32_t tsf_delta_bmiss;
343
int32_t tsf_remainder;
344
uint64_t tsf_beacon_target;
345
int tsf_intval;
346
347
tsf_beacon_old = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32;
348
tsf_beacon_old |= le32dec(ni->ni_tstamp.data);
349
350
#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
351
tsf_intval = 1;
352
if (ni->ni_intval > 0) {
353
tsf_intval = TU_TO_TSF(ni->ni_intval);
354
}
355
#undef TU_TO_TSF
356
357
/*
358
* Call up first so subsequent work can use information
359
* potentially stored in the node (e.g. for ibss merge).
360
*/
361
ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
362
switch (subtype) {
363
case IEEE80211_FC0_SUBTYPE_BEACON:
364
/*
365
* Always update the per-node beacon RSSI if we're hearing
366
* beacons from that node.
367
*/
368
ATH_RSSI_LPF(ATH_NODE(ni)->an_node_stats.ns_avgbrssi, rssi);
369
370
/*
371
* Only do the following processing if it's for
372
* the current BSS.
373
*
374
* In scan and IBSS mode we receive all beacons,
375
* which means we need to filter out stuff
376
* that isn't for us or we'll end up constantly
377
* trying to sync / merge to BSSes that aren't
378
* actually us.
379
*/
380
if ((vap->iv_opmode != IEEE80211_M_HOSTAP) &&
381
IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) {
382
/* update rssi statistics for use by the hal */
383
/* XXX unlocked check against vap->iv_bss? */
384
ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
385
386
tsf_beacon = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32;
387
tsf_beacon |= le32dec(ni->ni_tstamp.data);
388
389
nexttbtt = ath_hal_getnexttbtt(sc->sc_ah);
390
391
/*
392
* Let's calculate the delta and remainder, so we can see
393
* if the beacon timer from the AP is varying by more than
394
* a few TU. (Which would be a huge, huge problem.)
395
*/
396
tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old;
397
398
tsf_delta_bmiss = tsf_delta / tsf_intval;
399
400
/*
401
* If our delta is greater than half the beacon interval,
402
* let's round the bmiss value up to the next beacon
403
* interval. Ie, we're running really, really early
404
* on the next beacon.
405
*/
406
if (tsf_delta % tsf_intval > (tsf_intval / 2))
407
tsf_delta_bmiss ++;
408
409
tsf_beacon_target = tsf_beacon_old +
410
(((unsigned long long) tsf_delta_bmiss) * (long long) tsf_intval);
411
412
/*
413
* The remainder using '%' is between 0 .. intval-1.
414
* If we're actually running too fast, then the remainder
415
* will be some large number just under intval-1.
416
* So we need to look at whether we're running
417
* before or after the target beacon interval
418
* and if we are, modify how we do the remainder
419
* calculation.
420
*/
421
if (tsf_beacon < tsf_beacon_target) {
422
tsf_remainder =
423
-(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval));
424
} else {
425
tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval;
426
}
427
428
DPRINTF(sc, ATH_DEBUG_BEACON, "%s: %s: old_tsf=%llu (%u), new_tsf=%llu (%u), target_tsf=%llu (%u), delta=%lld, bmiss=%d, remainder=%d\n",
429
__func__,
430
ieee80211_get_vap_ifname(vap),
431
(unsigned long long) tsf_beacon_old,
432
(unsigned int) (tsf_beacon_old >> 10),
433
(unsigned long long) tsf_beacon,
434
(unsigned int ) (tsf_beacon >> 10),
435
(unsigned long long) tsf_beacon_target,
436
(unsigned int) (tsf_beacon_target >> 10),
437
(long long) tsf_delta,
438
tsf_delta_bmiss,
439
tsf_remainder);
440
441
DPRINTF(sc, ATH_DEBUG_BEACON, "%s: %s: ni=%6D bssid=%6D tsf=%llu (%u), nexttbtt=%llu (%u), delta=%d\n",
442
__func__,
443
ieee80211_get_vap_ifname(vap),
444
ni->ni_bssid, ":",
445
vap->iv_bss->ni_bssid, ":",
446
(unsigned long long) tsf_beacon,
447
(unsigned int) (tsf_beacon >> 10),
448
(unsigned long long) nexttbtt,
449
(unsigned int) (nexttbtt >> 10),
450
(int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval);
451
452
/*
453
* We only do syncbeacon on STA VAPs; not on IBSS;
454
* but don't do it with swbmiss enabled or we
455
* may end up overwriting AP mode beacon config.
456
*
457
* The driver (and net80211) should be smarter about
458
* this..
459
*/
460
if (vap->iv_opmode == IEEE80211_M_STA &&
461
sc->sc_syncbeacon &&
462
(!sc->sc_swbmiss) &&
463
ni == vap->iv_bss &&
464
((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) &&
465
(vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) {
466
DPRINTF(sc, ATH_DEBUG_BEACON,
467
"%s: syncbeacon=1; syncing\n",
468
__func__);
469
/*
470
* Resync beacon timers using the tsf of the beacon
471
* frame we just received.
472
*/
473
ath_beacon_config(sc, vap);
474
sc->sc_syncbeacon = 0;
475
}
476
}
477
478
/* fall thru... */
479
case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
480
if (vap->iv_opmode == IEEE80211_M_IBSS &&
481
vap->iv_state == IEEE80211_S_RUN &&
482
ieee80211_ibss_merge_check(ni)) {
483
uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
484
uint64_t tsf = ath_extend_tsf(sc, rstamp,
485
ath_hal_gettsf64(sc->sc_ah));
486
/*
487
* Handle ibss merge as needed; check the tsf on the
488
* frame before attempting the merge. The 802.11 spec
489
* says the station should change it's bssid to match
490
* the oldest station with the same ssid, where oldest
491
* is determined by the tsf. Note that hardware
492
* reconfiguration happens through callback to
493
* ath_newstate as the state machine will go from
494
* RUN -> RUN when this happens.
495
*/
496
if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
497
DPRINTF(sc, ATH_DEBUG_STATE,
498
"ibss merge, rstamp %u tsf %ju "
499
"tstamp %ju\n", rstamp, (uintmax_t)tsf,
500
(uintmax_t)ni->ni_tstamp.tsf);
501
(void) ieee80211_ibss_merge(ni);
502
}
503
}
504
break;
505
}
506
}
507
508
#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
509
static void
510
ath_rx_tap_vendor(struct ath_softc *sc, struct mbuf *m,
511
const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
512
{
513
514
/* Fill in the extension bitmap */
515
sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER);
516
517
/* Fill in the vendor header */
518
sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f;
519
sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03;
520
sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00;
521
522
/* XXX what should this be? */
523
sc->sc_rx_th.wr_vh.vh_sub_ns = 0;
524
sc->sc_rx_th.wr_vh.vh_skip_len =
525
htole16(sizeof(struct ath_radiotap_vendor_hdr));
526
527
/* General version info */
528
sc->sc_rx_th.wr_v.vh_version = 1;
529
530
sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask;
531
532
/* rssi */
533
sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0];
534
sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1];
535
sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2];
536
sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0];
537
sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1];
538
sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2];
539
540
/* evm */
541
sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0;
542
sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1;
543
sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2;
544
/* These are only populated from the AR9300 or later */
545
sc->sc_rx_th.wr_v.evm[3] = rs->rs_evm3;
546
sc->sc_rx_th.wr_v.evm[4] = rs->rs_evm4;
547
548
/* direction */
549
sc->sc_rx_th.wr_v.vh_flags = ATH_VENDOR_PKT_RX;
550
551
/* RX rate */
552
sc->sc_rx_th.wr_v.vh_rx_hwrate = rs->rs_rate;
553
554
/* RX flags */
555
sc->sc_rx_th.wr_v.vh_rs_flags = rs->rs_flags;
556
557
if (rs->rs_isaggr)
558
sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_ISAGGR;
559
if (rs->rs_moreaggr)
560
sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_MOREAGGR;
561
562
/* phyerr info */
563
if (rs->rs_status & HAL_RXERR_PHY) {
564
sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr;
565
sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_RXPHYERR;
566
} else {
567
sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff;
568
}
569
sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status;
570
sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi;
571
}
572
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
573
574
static void
575
ath_rx_tap(struct ath_softc *sc, struct mbuf *m,
576
const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
577
{
578
#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
579
#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
580
#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
581
#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
582
const HAL_RATE_TABLE *rt;
583
uint8_t rix;
584
585
rt = sc->sc_currates;
586
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
587
rix = rt->rateCodeToIndex[rs->rs_rate];
588
sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
589
sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
590
591
/* 802.11 specific flags */
592
sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
593
if (rs->rs_status & HAL_RXERR_PHY) {
594
/*
595
* PHY error - make sure the channel flags
596
* reflect the actual channel configuration,
597
* not the received frame.
598
*/
599
if (IEEE80211_IS_CHAN_HT40U(sc->sc_curchan))
600
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
601
else if (IEEE80211_IS_CHAN_HT40D(sc->sc_curchan))
602
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
603
else if (IEEE80211_IS_CHAN_HT20(sc->sc_curchan))
604
sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
605
} else if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
606
struct ieee80211com *ic = &sc->sc_ic;
607
608
if ((rs->rs_flags & HAL_RX_2040) == 0)
609
sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
610
else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
611
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
612
else
613
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
614
615
if (rs->rs_flags & HAL_RX_GI)
616
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
617
}
618
619
sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
620
if (rs->rs_status & HAL_RXERR_CRC)
621
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
622
/* XXX propagate other error flags from descriptor */
623
sc->sc_rx_th.wr_antnoise = nf;
624
sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
625
sc->sc_rx_th.wr_antenna = rs->rs_antenna;
626
#undef CHAN_HT
627
#undef CHAN_HT20
628
#undef CHAN_HT40U
629
#undef CHAN_HT40D
630
}
631
632
static void
633
ath_handle_micerror(struct ieee80211com *ic,
634
struct ieee80211_frame *wh, int keyix)
635
{
636
struct ieee80211_node *ni;
637
638
/* XXX recheck MIC to deal w/ chips that lie */
639
/* XXX discard MIC errors on !data frames */
640
ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
641
if (ni != NULL) {
642
ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
643
ieee80211_free_node(ni);
644
}
645
}
646
647
/*
648
* Process a single packet.
649
*
650
* The mbuf must already be synced, unmapped and removed from bf->bf_m
651
* by this stage.
652
*
653
* The mbuf must be consumed by this routine - either passed up the
654
* net80211 stack, put on the holding queue, or freed.
655
*/
656
int
657
ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
658
uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf,
659
struct mbuf *m)
660
{
661
uint64_t rstamp;
662
/* XXX TODO: make this an mbuf tag? */
663
struct ieee80211_rx_stats rxs;
664
int len, type, i;
665
struct ieee80211com *ic = &sc->sc_ic;
666
struct ieee80211_node *ni;
667
int is_good = 0;
668
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
669
670
/*
671
* Calculate the correct 64 bit TSF given
672
* the TSF64 register value and rs_tstamp.
673
*/
674
rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
675
676
/* 802.11 return codes - These aren't specifically errors */
677
if (rs->rs_flags & HAL_RX_GI)
678
sc->sc_stats.ast_rx_halfgi++;
679
if (rs->rs_flags & HAL_RX_2040)
680
sc->sc_stats.ast_rx_2040++;
681
if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
682
sc->sc_stats.ast_rx_pre_crc_err++;
683
if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
684
sc->sc_stats.ast_rx_post_crc_err++;
685
if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
686
sc->sc_stats.ast_rx_decrypt_busy_err++;
687
if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
688
sc->sc_stats.ast_rx_hi_rx_chain++;
689
if (rs->rs_flags & HAL_RX_STBC)
690
sc->sc_stats.ast_rx_stbc++;
691
692
if (rs->rs_status != 0) {
693
if (rs->rs_status & HAL_RXERR_CRC)
694
sc->sc_stats.ast_rx_crcerr++;
695
if (rs->rs_status & HAL_RXERR_FIFO)
696
sc->sc_stats.ast_rx_fifoerr++;
697
if (rs->rs_status & HAL_RXERR_PHY) {
698
sc->sc_stats.ast_rx_phyerr++;
699
/* Process DFS radar events */
700
if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
701
(rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
702
/* Now pass it to the radar processing code */
703
ath_dfs_process_phy_err(sc, m, rstamp, rs);
704
}
705
706
/*
707
* Be suitably paranoid about receiving phy errors
708
* out of the stats array bounds
709
*/
710
if (rs->rs_phyerr < ATH_IOCTL_STATS_NUM_RX_PHYERR)
711
sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
712
goto rx_error; /* NB: don't count in ierrors */
713
}
714
if (rs->rs_status & HAL_RXERR_DECRYPT) {
715
/*
716
* Decrypt error. If the error occurred
717
* because there was no hardware key, then
718
* let the frame through so the upper layers
719
* can process it. This is necessary for 5210
720
* parts which have no way to setup a ``clear''
721
* key cache entry.
722
*
723
* XXX do key cache faulting
724
*/
725
if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
726
goto rx_accept;
727
sc->sc_stats.ast_rx_badcrypt++;
728
}
729
/*
730
* Similar as above - if the failure was a keymiss
731
* just punt it up to the upper layers for now.
732
*/
733
if (rs->rs_status & HAL_RXERR_KEYMISS) {
734
sc->sc_stats.ast_rx_keymiss++;
735
goto rx_accept;
736
}
737
if (rs->rs_status & HAL_RXERR_MIC) {
738
sc->sc_stats.ast_rx_badmic++;
739
/*
740
* Do minimal work required to hand off
741
* the 802.11 header for notification.
742
*/
743
/* XXX frag's and qos frames */
744
len = rs->rs_datalen;
745
if (len >= sizeof (struct ieee80211_frame)) {
746
ath_handle_micerror(ic,
747
mtod(m, struct ieee80211_frame *),
748
sc->sc_splitmic ?
749
rs->rs_keyix-32 : rs->rs_keyix);
750
}
751
}
752
counter_u64_add(ic->ic_ierrors, 1);
753
rx_error:
754
/*
755
* Cleanup any pending partial frame.
756
*/
757
if (re->m_rxpending != NULL) {
758
m_freem(re->m_rxpending);
759
re->m_rxpending = NULL;
760
}
761
/*
762
* When a tap is present pass error frames
763
* that have been requested. By default we
764
* pass decrypt+mic errors but others may be
765
* interesting (e.g. crc).
766
*/
767
if (ieee80211_radiotap_active(ic) &&
768
(rs->rs_status & sc->sc_monpass)) {
769
/* NB: bpf needs the mbuf length setup */
770
len = rs->rs_datalen;
771
m->m_pkthdr.len = m->m_len = len;
772
ath_rx_tap(sc, m, rs, rstamp, nf);
773
#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
774
ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
775
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
776
ieee80211_radiotap_rx_all(ic, m);
777
}
778
/* XXX pass MIC errors up for s/w reclaculation */
779
m_freem(m); m = NULL;
780
goto rx_next;
781
}
782
rx_accept:
783
len = rs->rs_datalen;
784
m->m_len = len;
785
786
if (rs->rs_more) {
787
/*
788
* Frame spans multiple descriptors; save
789
* it for the next completed descriptor, it
790
* will be used to construct a jumbogram.
791
*/
792
if (re->m_rxpending != NULL) {
793
/* NB: max frame size is currently 2 clusters */
794
sc->sc_stats.ast_rx_toobig++;
795
m_freem(re->m_rxpending);
796
}
797
m->m_pkthdr.len = len;
798
re->m_rxpending = m;
799
m = NULL;
800
goto rx_next;
801
} else if (re->m_rxpending != NULL) {
802
/*
803
* This is the second part of a jumbogram,
804
* chain it to the first mbuf, adjust the
805
* frame length, and clear the rxpending state.
806
*/
807
re->m_rxpending->m_next = m;
808
re->m_rxpending->m_pkthdr.len += len;
809
m = re->m_rxpending;
810
re->m_rxpending = NULL;
811
} else {
812
/*
813
* Normal single-descriptor receive; setup packet length.
814
*/
815
m->m_pkthdr.len = len;
816
}
817
818
/*
819
* Validate rs->rs_antenna.
820
*
821
* Some users w/ AR9285 NICs have reported crashes
822
* here because rs_antenna field is bogusly large.
823
* Let's enforce the maximum antenna limit of 8
824
* (and it shouldn't be hard coded, but that's a
825
* separate problem) and if there's an issue, print
826
* out an error and adjust rs_antenna to something
827
* sensible.
828
*
829
* This code should be removed once the actual
830
* root cause of the issue has been identified.
831
* For example, it may be that the rs_antenna
832
* field is only valid for the last frame of
833
* an aggregate and it just happens that it is
834
* "mostly" right. (This is a general statement -
835
* the majority of the statistics are only valid
836
* for the last frame in an aggregate.
837
*/
838
if (rs->rs_antenna >= ATH_IOCTL_STATS_NUM_RX_ANTENNA) {
839
device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
840
__func__, rs->rs_antenna);
841
#ifdef ATH_DEBUG
842
ath_printrxbuf(sc, bf, 0, status == HAL_OK);
843
#endif /* ATH_DEBUG */
844
rs->rs_antenna = 0; /* XXX better than nothing */
845
}
846
847
/*
848
* If this is an AR9285/AR9485, then the receive and LNA
849
* configuration is stored in RSSI[2] / EXTRSSI[2].
850
* We can extract this out to build a much better
851
* receive antenna profile.
852
*
853
* Yes, this just blurts over the above RX antenna field
854
* for now. It's fine, the AR9285 doesn't really use
855
* that.
856
*
857
* Later on we should store away the fine grained LNA
858
* information and keep separate counters just for
859
* that. It'll help when debugging the AR9285/AR9485
860
* combined diversity code.
861
*/
862
if (sc->sc_rx_lnamixer) {
863
rs->rs_antenna = 0;
864
865
/* Bits 0:1 - the LNA configuration used */
866
rs->rs_antenna |=
867
((rs->rs_rssi_ctl[2] & HAL_RX_LNA_CFG_USED)
868
>> HAL_RX_LNA_CFG_USED_S);
869
870
/* Bit 2 - the external RX antenna switch */
871
if (rs->rs_rssi_ctl[2] & HAL_RX_LNA_EXTCFG)
872
rs->rs_antenna |= 0x4;
873
}
874
875
sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
876
877
/*
878
* Populate the rx status block. When there are bpf
879
* listeners we do the additional work to provide
880
* complete status. Otherwise we fill in only the
881
* material required by ieee80211_input. Note that
882
* noise setting is filled in above.
883
*/
884
if (ieee80211_radiotap_active(ic)) {
885
ath_rx_tap(sc, m, rs, rstamp, nf);
886
#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
887
ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
888
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
889
}
890
891
/*
892
* From this point on we assume the frame is at least
893
* as large as ieee80211_frame_min; verify that.
894
*/
895
if (len < IEEE80211_MIN_LEN) {
896
if (!ieee80211_radiotap_active(ic)) {
897
DPRINTF(sc, ATH_DEBUG_RECV,
898
"%s: short packet %d\n", __func__, len);
899
sc->sc_stats.ast_rx_tooshort++;
900
} else {
901
/* NB: in particular this captures ack's */
902
ieee80211_radiotap_rx_all(ic, m);
903
}
904
m_freem(m); m = NULL;
905
goto rx_next;
906
}
907
908
if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
909
const HAL_RATE_TABLE *rt = sc->sc_currates;
910
uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
911
912
ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
913
sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
914
}
915
916
m_adj(m, -IEEE80211_CRC_LEN);
917
918
/*
919
* Locate the node for sender, track state, and then
920
* pass the (referenced) node up to the 802.11 layer
921
* for its use.
922
*/
923
ni = ieee80211_find_rxnode_withkey(ic,
924
mtod(m, const struct ieee80211_frame_min *),
925
rs->rs_keyix == HAL_RXKEYIX_INVALID ?
926
IEEE80211_KEYIX_NONE : rs->rs_keyix);
927
sc->sc_lastrs = rs;
928
929
if (rs->rs_isaggr)
930
sc->sc_stats.ast_rx_agg++;
931
932
/*
933
* Populate the per-chain RSSI values where appropriate.
934
*/
935
bzero(&rxs, sizeof(rxs));
936
rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI |
937
IEEE80211_R_C_CHAIN |
938
IEEE80211_R_C_NF |
939
IEEE80211_R_C_RSSI |
940
IEEE80211_R_TSF64 |
941
IEEE80211_R_TSF_START; /* XXX TODO: validate */
942
rxs.c_rssi = rs->rs_rssi;
943
rxs.c_nf = nf;
944
rxs.c_chain = 3; /* XXX TODO: check */
945
rxs.c_rx_tsf = rstamp;
946
947
for (i = 0; i < 3; i++) {
948
rxs.c_rssi_ctl[i] = rs->rs_rssi_ctl[i];
949
rxs.c_rssi_ext[i] = rs->rs_rssi_ext[i];
950
/*
951
* XXX note: we currently don't track
952
* per-chain noisefloor.
953
*/
954
rxs.c_nf_ctl[i] = nf;
955
rxs.c_nf_ext[i] = nf;
956
}
957
958
if (ni != NULL) {
959
/*
960
* Only punt packets for ampdu reorder processing for
961
* 11n nodes; net80211 enforces that M_AMPDU is only
962
* set for 11n nodes.
963
*/
964
if (ni->ni_flags & IEEE80211_NODE_HT)
965
m->m_flags |= M_AMPDU;
966
967
/*
968
* Inform rate control about the received RSSI.
969
* It can then use this information to potentially drastically
970
* alter the available rate based on the RSSI estimate.
971
*
972
* This is super important when associating to a far away station;
973
* you don't want to waste time trying higher rates at some low
974
* packet exchange rate (like during DHCP) just to establish
975
* that higher MCS rates aren't available.
976
*/
977
ATH_RSSI_LPF(ATH_NODE(ni)->an_node_stats.ns_avgrssi,
978
rs->rs_rssi);
979
ath_rate_update_rx_rssi(sc, ATH_NODE(ni),
980
ATH_RSSI(ATH_NODE(ni)->an_node_stats.ns_avgrssi));
981
982
/*
983
* Sending station is known, dispatch directly.
984
*/
985
(void) ieee80211_add_rx_params(m, &rxs);
986
type = ieee80211_input_mimo(ni, m);
987
ieee80211_free_node(ni);
988
m = NULL;
989
/*
990
* Arrange to update the last rx timestamp only for
991
* frames from our ap when operating in station mode.
992
* This assumes the rx key is always setup when
993
* associated.
994
*/
995
if (ic->ic_opmode == IEEE80211_M_STA &&
996
rs->rs_keyix != HAL_RXKEYIX_INVALID)
997
is_good = 1;
998
} else {
999
(void) ieee80211_add_rx_params(m, &rxs);
1000
type = ieee80211_input_mimo_all(ic, m);
1001
m = NULL;
1002
}
1003
1004
/*
1005
* At this point we have passed the frame up the stack; thus
1006
* the mbuf is no longer ours.
1007
*/
1008
1009
/*
1010
* Track legacy station RX rssi and do any rx antenna management.
1011
*/
1012
ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
1013
if (sc->sc_diversity) {
1014
/*
1015
* When using fast diversity, change the default rx
1016
* antenna if diversity chooses the other antenna 3
1017
* times in a row.
1018
*/
1019
if (sc->sc_defant != rs->rs_antenna) {
1020
if (++sc->sc_rxotherant >= 3)
1021
ath_setdefantenna(sc, rs->rs_antenna);
1022
} else
1023
sc->sc_rxotherant = 0;
1024
}
1025
1026
/* Handle slow diversity if enabled */
1027
if (sc->sc_dolnadiv) {
1028
ath_lna_rx_comb_scan(sc, rs, ticks, hz);
1029
}
1030
1031
if (sc->sc_softled) {
1032
/*
1033
* Blink for any data frame. Otherwise do a
1034
* heartbeat-style blink when idle. The latter
1035
* is mainly for station mode where we depend on
1036
* periodic beacon frames to trigger the poll event.
1037
*/
1038
if (type == IEEE80211_FC0_TYPE_DATA) {
1039
const HAL_RATE_TABLE *rt = sc->sc_currates;
1040
ath_led_event(sc,
1041
rt->rateCodeToIndex[rs->rs_rate]);
1042
} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
1043
ath_led_event(sc, 0);
1044
}
1045
rx_next:
1046
/*
1047
* Debugging - complain if we didn't NULL the mbuf pointer
1048
* here.
1049
*/
1050
if (m != NULL) {
1051
device_printf(sc->sc_dev,
1052
"%s: mbuf %p should've been freed!\n",
1053
__func__,
1054
m);
1055
}
1056
return (is_good);
1057
}
1058
1059
#define ATH_RX_MAX 128
1060
1061
/*
1062
* XXX TODO: break out the "get buffers" from "call ath_rx_pkt()" like
1063
* the EDMA code does.
1064
*
1065
* XXX TODO: then, do all of the RX list management stuff inside
1066
* ATH_RX_LOCK() so we don't end up potentially racing. The EDMA
1067
* code is doing it right.
1068
*/
1069
static void
1070
ath_rx_proc(struct ath_softc *sc, int resched)
1071
{
1072
#define PA2DESC(_sc, _pa) \
1073
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
1074
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
1075
struct ath_buf *bf;
1076
struct ath_hal *ah = sc->sc_ah;
1077
#ifdef IEEE80211_SUPPORT_SUPERG
1078
struct ieee80211com *ic = &sc->sc_ic;
1079
#endif
1080
struct ath_desc *ds;
1081
struct ath_rx_status *rs;
1082
struct mbuf *m;
1083
int ngood;
1084
HAL_STATUS status;
1085
int16_t nf;
1086
u_int64_t tsf;
1087
int npkts = 0;
1088
int kickpcu = 0;
1089
int ret;
1090
1091
/* XXX we must not hold the ATH_LOCK here */
1092
ATH_UNLOCK_ASSERT(sc);
1093
ATH_PCU_UNLOCK_ASSERT(sc);
1094
1095
ATH_PCU_LOCK(sc);
1096
sc->sc_rxproc_cnt++;
1097
kickpcu = sc->sc_kickpcu;
1098
ATH_PCU_UNLOCK(sc);
1099
1100
ATH_LOCK(sc);
1101
ath_power_set_power_state(sc, HAL_PM_AWAKE);
1102
ATH_UNLOCK(sc);
1103
1104
DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
1105
ngood = 0;
1106
nf = ath_hal_getchannoise(ah, sc->sc_curchan);
1107
sc->sc_stats.ast_rx_noise = nf;
1108
tsf = ath_hal_gettsf64(ah);
1109
do {
1110
/*
1111
* Don't process too many packets at a time; give the
1112
* TX thread time to also run - otherwise the TX
1113
* latency can jump by quite a bit, causing throughput
1114
* degredation.
1115
*/
1116
if (!kickpcu && npkts >= ATH_RX_MAX)
1117
break;
1118
1119
bf = TAILQ_FIRST(&sc->sc_rxbuf);
1120
if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */
1121
device_printf(sc->sc_dev, "%s: no buffer!\n", __func__);
1122
break;
1123
} else if (bf == NULL) {
1124
/*
1125
* End of List:
1126
* this can happen for non-self-linked RX chains
1127
*/
1128
sc->sc_stats.ast_rx_hitqueueend++;
1129
break;
1130
}
1131
m = bf->bf_m;
1132
if (m == NULL) { /* NB: shouldn't happen */
1133
/*
1134
* If mbuf allocation failed previously there
1135
* will be no mbuf; try again to re-populate it.
1136
*/
1137
/* XXX make debug msg */
1138
device_printf(sc->sc_dev, "%s: no mbuf!\n", __func__);
1139
TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
1140
goto rx_proc_next;
1141
}
1142
ds = bf->bf_desc;
1143
if (ds->ds_link == bf->bf_daddr) {
1144
/* NB: never process the self-linked entry at the end */
1145
sc->sc_stats.ast_rx_hitqueueend++;
1146
break;
1147
}
1148
/* XXX sync descriptor memory */
1149
/*
1150
* Must provide the virtual address of the current
1151
* descriptor, the physical address, and the virtual
1152
* address of the next descriptor in the h/w chain.
1153
* This allows the HAL to look ahead to see if the
1154
* hardware is done with a descriptor by checking the
1155
* done bit in the following descriptor and the address
1156
* of the current descriptor the DMA engine is working
1157
* on. All this is necessary because of our use of
1158
* a self-linked list to avoid rx overruns.
1159
*/
1160
rs = &bf->bf_status.ds_rxstat;
1161
status = ath_hal_rxprocdesc(ah, ds,
1162
bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
1163
#ifdef ATH_DEBUG
1164
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
1165
ath_printrxbuf(sc, bf, 0, status == HAL_OK);
1166
#endif
1167
1168
#ifdef ATH_DEBUG_ALQ
1169
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
1170
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
1171
sc->sc_rx_statuslen, (char *) ds);
1172
#endif /* ATH_DEBUG_ALQ */
1173
1174
if (status == HAL_EINPROGRESS)
1175
break;
1176
1177
TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
1178
npkts++;
1179
1180
/*
1181
* Process a single frame.
1182
*/
1183
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD);
1184
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
1185
bf->bf_m = NULL;
1186
if (ath_rx_pkt(sc, rs, status, tsf, nf, HAL_RX_QUEUE_HP, bf, m))
1187
ngood++;
1188
rx_proc_next:
1189
/*
1190
* If there's a holding buffer, insert that onto
1191
* the RX list; the hardware is now definitely not pointing
1192
* to it now.
1193
*/
1194
ret = 0;
1195
if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf != NULL) {
1196
TAILQ_INSERT_TAIL(&sc->sc_rxbuf,
1197
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf,
1198
bf_list);
1199
ret = ath_rxbuf_init(sc,
1200
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf);
1201
}
1202
/*
1203
* Next, throw our buffer into the holding entry. The hardware
1204
* may use the descriptor to read the link pointer before
1205
* DMAing the next descriptor in to write out a packet.
1206
*/
1207
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = bf;
1208
} while (ret == 0);
1209
1210
/* rx signal state monitoring */
1211
ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
1212
if (ngood)
1213
sc->sc_lastrx = tsf;
1214
1215
ATH_KTR(sc, ATH_KTR_RXPROC, 2, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
1216
/* Queue DFS tasklet if needed */
1217
if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
1218
taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
1219
1220
/*
1221
* Now that all the RX frames were handled that
1222
* need to be handled, kick the PCU if there's
1223
* been an RXEOL condition.
1224
*/
1225
if (resched && kickpcu) {
1226
ATH_PCU_LOCK(sc);
1227
ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_rx_proc: kickpcu");
1228
device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
1229
__func__, npkts);
1230
1231
/*
1232
* Go through the process of fully tearing down
1233
* the RX buffers and reinitialising them.
1234
*
1235
* There's a hardware bug that causes the RX FIFO
1236
* to get confused under certain conditions and
1237
* constantly write over the same frame, leading
1238
* the RX driver code here to get heavily confused.
1239
*/
1240
/*
1241
* XXX Has RX DMA stopped enough here to just call
1242
* ath_startrecv()?
1243
* XXX Do we need to use the holding buffer to restart
1244
* RX DMA by appending entries to the final
1245
* descriptor? Quite likely.
1246
*/
1247
#if 1
1248
ath_startrecv(sc);
1249
#else
1250
/*
1251
* Disabled for now - it'd be nice to be able to do
1252
* this in order to limit the amount of CPU time spent
1253
* reinitialising the RX side (and thus minimise RX
1254
* drops) however there's a hardware issue that
1255
* causes things to get too far out of whack.
1256
*/
1257
/*
1258
* XXX can we hold the PCU lock here?
1259
* Are there any net80211 buffer calls involved?
1260
*/
1261
bf = TAILQ_FIRST(&sc->sc_rxbuf);
1262
ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
1263
ath_hal_rxena(ah); /* enable recv descriptors */
1264
ath_mode_init(sc); /* set filters, etc. */
1265
ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); /* re-enable PCU/DMA engine */
1266
#endif
1267
1268
ath_hal_intrset(ah, sc->sc_imask);
1269
sc->sc_kickpcu = 0;
1270
ATH_PCU_UNLOCK(sc);
1271
}
1272
1273
#ifdef IEEE80211_SUPPORT_SUPERG
1274
if (resched)
1275
ieee80211_ff_age_all(ic, 100);
1276
#endif
1277
1278
/*
1279
* Put the hardware to sleep again if we're done with it.
1280
*/
1281
ATH_LOCK(sc);
1282
ath_power_restore_power_state(sc);
1283
ATH_UNLOCK(sc);
1284
1285
/*
1286
* If we hit the maximum number of frames in this round,
1287
* reschedule for another immediate pass. This gives
1288
* the TX and TX completion routines time to run, which
1289
* will reduce latency.
1290
*/
1291
if (npkts >= ATH_RX_MAX)
1292
sc->sc_rx.recv_sched(sc, resched);
1293
1294
ATH_PCU_LOCK(sc);
1295
sc->sc_rxproc_cnt--;
1296
ATH_PCU_UNLOCK(sc);
1297
}
1298
#undef PA2DESC
1299
#undef ATH_RX_MAX
1300
1301
/*
1302
* Only run the RX proc if it's not already running.
1303
* Since this may get run as part of the reset/flush path,
1304
* the task can't clash with an existing, running tasklet.
1305
*/
1306
static void
1307
ath_legacy_rx_tasklet(void *arg, int npending)
1308
{
1309
struct ath_softc *sc = arg;
1310
1311
ATH_KTR(sc, ATH_KTR_RXPROC, 1, "ath_rx_proc: pending=%d", npending);
1312
DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
1313
ATH_PCU_LOCK(sc);
1314
if (sc->sc_inreset_cnt > 0) {
1315
device_printf(sc->sc_dev,
1316
"%s: sc_inreset_cnt > 0; skipping\n", __func__);
1317
ATH_PCU_UNLOCK(sc);
1318
return;
1319
}
1320
ATH_PCU_UNLOCK(sc);
1321
1322
ath_rx_proc(sc, 1);
1323
}
1324
1325
static void
1326
ath_legacy_flushrecv(struct ath_softc *sc)
1327
{
1328
1329
ath_rx_proc(sc, 0);
1330
}
1331
1332
static void
1333
ath_legacy_flush_rxpending(struct ath_softc *sc)
1334
{
1335
1336
/* XXX ATH_RX_LOCK_ASSERT(sc); */
1337
1338
if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) {
1339
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
1340
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
1341
}
1342
if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) {
1343
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
1344
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
1345
}
1346
}
1347
1348
static int
1349
ath_legacy_flush_rxholdbf(struct ath_softc *sc)
1350
{
1351
struct ath_buf *bf;
1352
1353
/* XXX ATH_RX_LOCK_ASSERT(sc); */
1354
/*
1355
* If there are RX holding buffers, free them here and return
1356
* them to the list.
1357
*
1358
* XXX should just verify that bf->bf_m is NULL, as it must
1359
* be at this point!
1360
*/
1361
bf = sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf;
1362
if (bf != NULL) {
1363
if (bf->bf_m != NULL)
1364
m_freem(bf->bf_m);
1365
bf->bf_m = NULL;
1366
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
1367
(void) ath_rxbuf_init(sc, bf);
1368
}
1369
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = NULL;
1370
1371
bf = sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf;
1372
if (bf != NULL) {
1373
if (bf->bf_m != NULL)
1374
m_freem(bf->bf_m);
1375
bf->bf_m = NULL;
1376
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
1377
(void) ath_rxbuf_init(sc, bf);
1378
}
1379
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf = NULL;
1380
1381
return (0);
1382
}
1383
1384
/*
1385
* Disable the receive h/w in preparation for a reset.
1386
*/
1387
static void
1388
ath_legacy_stoprecv(struct ath_softc *sc, int dodelay)
1389
{
1390
#define PA2DESC(_sc, _pa) \
1391
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
1392
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
1393
struct ath_hal *ah = sc->sc_ah;
1394
1395
ATH_RX_LOCK(sc);
1396
1397
ath_hal_stoppcurecv(ah); /* disable PCU */
1398
ath_hal_setrxfilter(ah, 0); /* clear recv filter */
1399
ath_hal_stopdmarecv(ah); /* disable DMA engine */
1400
/*
1401
* TODO: see if this particular DELAY() is required; it may be
1402
* masking some missing FIFO flush or DMA sync.
1403
*/
1404
#if 0
1405
if (dodelay)
1406
#endif
1407
DELAY(3000); /* 3ms is long enough for 1 frame */
1408
#ifdef ATH_DEBUG
1409
if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
1410
struct ath_buf *bf;
1411
u_int ix;
1412
1413
device_printf(sc->sc_dev,
1414
"%s: rx queue %p, link %p\n",
1415
__func__,
1416
(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah, HAL_RX_QUEUE_HP),
1417
sc->sc_rxlink);
1418
ix = 0;
1419
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
1420
struct ath_desc *ds = bf->bf_desc;
1421
struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
1422
HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
1423
bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
1424
if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
1425
ath_printrxbuf(sc, bf, ix, status == HAL_OK);
1426
ix++;
1427
}
1428
}
1429
#endif
1430
1431
(void) ath_legacy_flush_rxpending(sc);
1432
(void) ath_legacy_flush_rxholdbf(sc);
1433
1434
sc->sc_rxlink = NULL; /* just in case */
1435
1436
ATH_RX_UNLOCK(sc);
1437
#undef PA2DESC
1438
}
1439
1440
/*
1441
* XXX TODO: something was calling startrecv without calling
1442
* stoprecv. Let's figure out what/why. It was showing up
1443
* as a mbuf leak (rxpending) and ath_buf leak (holdbf.)
1444
*/
1445
1446
/*
1447
* Enable the receive h/w following a reset.
1448
*/
1449
static int
1450
ath_legacy_startrecv(struct ath_softc *sc)
1451
{
1452
struct ath_hal *ah = sc->sc_ah;
1453
struct ath_buf *bf;
1454
1455
ATH_RX_LOCK(sc);
1456
1457
/*
1458
* XXX should verify these are already all NULL!
1459
*/
1460
sc->sc_rxlink = NULL;
1461
(void) ath_legacy_flush_rxpending(sc);
1462
(void) ath_legacy_flush_rxholdbf(sc);
1463
1464
/*
1465
* Re-chain all of the buffers in the RX buffer list.
1466
*/
1467
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
1468
int error = ath_rxbuf_init(sc, bf);
1469
if (error != 0) {
1470
DPRINTF(sc, ATH_DEBUG_RECV,
1471
"%s: ath_rxbuf_init failed %d\n",
1472
__func__, error);
1473
return error;
1474
}
1475
}
1476
1477
bf = TAILQ_FIRST(&sc->sc_rxbuf);
1478
ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
1479
ath_hal_rxena(ah); /* enable recv descriptors */
1480
ath_mode_init(sc); /* set filters, etc. */
1481
ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); /* re-enable PCU/DMA engine */
1482
1483
ATH_RX_UNLOCK(sc);
1484
return 0;
1485
}
1486
1487
static int
1488
ath_legacy_dma_rxsetup(struct ath_softc *sc)
1489
{
1490
int error;
1491
1492
error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
1493
"rx", sizeof(struct ath_desc), ath_rxbuf, 1);
1494
if (error != 0)
1495
return (error);
1496
1497
return (0);
1498
}
1499
1500
static int
1501
ath_legacy_dma_rxteardown(struct ath_softc *sc)
1502
{
1503
1504
if (sc->sc_rxdma.dd_desc_len != 0)
1505
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
1506
return (0);
1507
}
1508
1509
static void
1510
ath_legacy_recv_sched(struct ath_softc *sc, int dosched)
1511
{
1512
1513
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1514
}
1515
1516
static void
1517
ath_legacy_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE q,
1518
int dosched)
1519
{
1520
1521
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1522
}
1523
1524
void
1525
ath_recv_setup_legacy(struct ath_softc *sc)
1526
{
1527
1528
/* Sensible legacy defaults */
1529
/*
1530
* XXX this should be changed to properly support the
1531
* exact RX descriptor size for each HAL.
1532
*/
1533
sc->sc_rx_statuslen = sizeof(struct ath_desc);
1534
1535
sc->sc_rx.recv_start = ath_legacy_startrecv;
1536
sc->sc_rx.recv_stop = ath_legacy_stoprecv;
1537
sc->sc_rx.recv_flush = ath_legacy_flushrecv;
1538
sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet;
1539
sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init;
1540
1541
sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup;
1542
sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown;
1543
sc->sc_rx.recv_sched = ath_legacy_recv_sched;
1544
sc->sc_rx.recv_sched_queue = ath_legacy_recv_sched_queue;
1545
}
1546
1547