Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/ath/if_ath_tx.c
39535 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5
* Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer,
13
* without modification.
14
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
15
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16
* redistribution must be conditioned upon including a substantially
17
* similar Disclaimer requirement for further binary redistribution.
18
*
19
* NO WARRANTY
20
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30
* THE POSSIBILITY OF SUCH DAMAGES.
31
*/
32
33
#include <sys/cdefs.h>
34
/*
35
* Driver for the Atheros Wireless LAN controller.
36
*
37
* This software is derived from work of Atsushi Onoe; his contribution
38
* is greatly appreciated.
39
*/
40
41
#include "opt_inet.h"
42
#include "opt_ath.h"
43
#include "opt_wlan.h"
44
45
#include <sys/param.h>
46
#include <sys/systm.h>
47
#include <sys/sysctl.h>
48
#include <sys/mbuf.h>
49
#include <sys/malloc.h>
50
#include <sys/lock.h>
51
#include <sys/mutex.h>
52
#include <sys/kernel.h>
53
#include <sys/socket.h>
54
#include <sys/sockio.h>
55
#include <sys/errno.h>
56
#include <sys/callout.h>
57
#include <sys/bus.h>
58
#include <sys/endian.h>
59
#include <sys/kthread.h>
60
#include <sys/taskqueue.h>
61
#include <sys/priv.h>
62
#include <sys/ktr.h>
63
64
#include <machine/bus.h>
65
66
#include <net/if.h>
67
#include <net/if_var.h>
68
#include <net/if_dl.h>
69
#include <net/if_media.h>
70
#include <net/if_types.h>
71
#include <net/if_arp.h>
72
#include <net/ethernet.h>
73
#include <net/if_llc.h>
74
75
#include <net80211/ieee80211_var.h>
76
#include <net80211/ieee80211_regdomain.h>
77
#ifdef IEEE80211_SUPPORT_SUPERG
78
#include <net80211/ieee80211_superg.h>
79
#endif
80
#ifdef IEEE80211_SUPPORT_TDMA
81
#include <net80211/ieee80211_tdma.h>
82
#endif
83
#include <net80211/ieee80211_ht.h>
84
85
#include <net/bpf.h>
86
87
#ifdef INET
88
#include <netinet/in.h>
89
#include <netinet/if_ether.h>
90
#endif
91
92
#include <dev/ath/if_athvar.h>
93
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
94
#include <dev/ath/ath_hal/ah_diagcodes.h>
95
96
#include <dev/ath/if_ath_debug.h>
97
98
#ifdef ATH_TX99_DIAG
99
#include <dev/ath/ath_tx99/ath_tx99.h>
100
#endif
101
102
#include <dev/ath/if_ath_misc.h>
103
#include <dev/ath/if_ath_tx.h>
104
#include <dev/ath/if_ath_tx_ht.h>
105
106
#ifdef ATH_DEBUG_ALQ
107
#include <dev/ath/if_ath_alq.h>
108
#endif
109
110
/*
111
* How many retries to perform in software
112
*/
113
#define SWMAX_RETRIES 10
114
115
/*
116
* What queue to throw the non-QoS TID traffic into
117
*/
118
#define ATH_NONQOS_TID_AC WME_AC_VO
119
120
#if 0
121
static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
122
#endif
123
static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
124
int tid);
125
static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
126
int tid);
127
static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
128
struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
129
static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
130
struct ieee80211_node *ni, struct mbuf *m0, int *tid);
131
static struct ath_buf *
132
ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
133
struct ath_tid *tid, struct ath_buf *bf);
134
135
#ifdef ATH_DEBUG_ALQ
136
void
137
ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
138
{
139
struct ath_buf *bf;
140
int i, n;
141
const char *ds;
142
143
/* XXX we should skip out early if debugging isn't enabled! */
144
bf = bf_first;
145
146
while (bf != NULL) {
147
/* XXX should ensure bf_nseg > 0! */
148
if (bf->bf_nseg == 0)
149
break;
150
n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151
for (i = 0, ds = (const char *) bf->bf_desc;
152
i < n;
153
i++, ds += sc->sc_tx_desclen) {
154
if_ath_alq_post(&sc->sc_alq,
155
ATH_ALQ_EDMA_TXDESC,
156
sc->sc_tx_desclen,
157
ds);
158
}
159
bf = bf->bf_next;
160
}
161
}
162
#endif /* ATH_DEBUG_ALQ */
163
164
/*
165
* Whether to use the 11n rate scenario functions or not
166
*/
167
static inline int
168
ath_tx_is_11n(struct ath_softc *sc)
169
{
170
return ((sc->sc_ah->ah_magic == 0x20065416) ||
171
(sc->sc_ah->ah_magic == 0x19741014));
172
}
173
174
/*
175
* Obtain the current TID from the given frame.
176
*
177
* Non-QoS frames get mapped to a TID so frames consistently
178
* go on a sensible queue.
179
*/
180
static int
181
ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
182
{
183
const struct ieee80211_frame *wh;
184
185
wh = mtod(m0, const struct ieee80211_frame *);
186
187
/* Non-QoS: map frame to a TID queue for software queueing */
188
if (! IEEE80211_QOS_HAS_SEQ(wh))
189
return (WME_AC_TO_TID(M_WME_GETAC(m0)));
190
191
/* QoS - fetch the TID from the header, ignore mbuf WME */
192
return (ieee80211_gettid(wh));
193
}
194
195
static void
196
ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
197
{
198
struct ieee80211_frame *wh;
199
200
wh = mtod(bf->bf_m, struct ieee80211_frame *);
201
/* Only update/resync if needed */
202
if (bf->bf_state.bfs_isretried == 0) {
203
wh->i_fc[1] |= IEEE80211_FC1_RETRY;
204
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
205
BUS_DMASYNC_PREWRITE);
206
}
207
bf->bf_state.bfs_isretried = 1;
208
bf->bf_state.bfs_retries ++;
209
}
210
211
/*
212
* Determine what the correct AC queue for the given frame
213
* should be.
214
*
215
* For QoS frames, obey the TID. That way things like
216
* management frames that are related to a given TID
217
* are thus serialised with the rest of the TID traffic,
218
* regardless of net80211 overriding priority.
219
*
220
* For non-QoS frames, return the mbuf WMI priority.
221
*
222
* This has implications that higher priority non-QoS traffic
223
* may end up being scheduled before other non-QoS traffic,
224
* leading to out-of-sequence packets being emitted.
225
*
226
* (It'd be nice to log/count this so we can see if it
227
* really is a problem.)
228
*
229
* TODO: maybe we should throw multicast traffic, QoS or
230
* otherwise, into a separate TX queue?
231
*/
232
static int
233
ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
234
{
235
const struct ieee80211_frame *wh;
236
237
wh = mtod(m0, const struct ieee80211_frame *);
238
239
/*
240
* QoS data frame (sequence number or otherwise) -
241
* return hardware queue mapping for the underlying
242
* TID.
243
*/
244
if (IEEE80211_QOS_HAS_SEQ(wh))
245
return TID_TO_WME_AC(ieee80211_gettid(wh));
246
247
/*
248
* Otherwise - return mbuf QoS pri.
249
*/
250
return (M_WME_GETAC(m0));
251
}
252
253
void
254
ath_txfrag_cleanup(struct ath_softc *sc,
255
ath_bufhead *frags, struct ieee80211_node *ni)
256
{
257
struct ath_buf *bf, *next;
258
259
ATH_TXBUF_LOCK_ASSERT(sc);
260
261
TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
262
/* NB: bf assumed clean */
263
TAILQ_REMOVE(frags, bf, bf_list);
264
ath_returnbuf_head(sc, bf);
265
ieee80211_node_decref(ni);
266
}
267
}
268
269
/*
270
* Setup xmit of a fragmented frame. Allocate a buffer
271
* for each frag and bump the node reference count to
272
* reflect the held reference to be setup by ath_tx_start.
273
*/
274
int
275
ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
276
struct mbuf *m0, struct ieee80211_node *ni)
277
{
278
struct mbuf *m;
279
struct ath_buf *bf;
280
281
ATH_TXBUF_LOCK(sc);
282
for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
283
/* XXX non-management? */
284
bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
285
if (bf == NULL) { /* out of buffers, cleanup */
286
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
287
__func__);
288
ath_txfrag_cleanup(sc, frags, ni);
289
break;
290
}
291
(void) ieee80211_ref_node(ni);
292
TAILQ_INSERT_TAIL(frags, bf, bf_list);
293
}
294
ATH_TXBUF_UNLOCK(sc);
295
296
return !TAILQ_EMPTY(frags);
297
}
298
299
static int
300
ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
301
{
302
struct mbuf *m;
303
int error;
304
305
/*
306
* Load the DMA map so any coalescing is done. This
307
* also calculates the number of descriptors we need.
308
*/
309
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
310
bf->bf_segs, &bf->bf_nseg,
311
BUS_DMA_NOWAIT);
312
if (error == EFBIG) {
313
/* XXX packet requires too many descriptors */
314
bf->bf_nseg = ATH_MAX_SCATTER + 1;
315
} else if (error != 0) {
316
sc->sc_stats.ast_tx_busdma++;
317
ieee80211_free_mbuf(m0);
318
return error;
319
}
320
/*
321
* Discard null packets and check for packets that
322
* require too many TX descriptors. We try to convert
323
* the latter to a cluster.
324
*/
325
if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
326
sc->sc_stats.ast_tx_linear++;
327
m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
328
if (m == NULL) {
329
ieee80211_free_mbuf(m0);
330
sc->sc_stats.ast_tx_nombuf++;
331
return ENOMEM;
332
}
333
m0 = m;
334
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
335
bf->bf_segs, &bf->bf_nseg,
336
BUS_DMA_NOWAIT);
337
if (error != 0) {
338
sc->sc_stats.ast_tx_busdma++;
339
ieee80211_free_mbuf(m0);
340
return error;
341
}
342
KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
343
("too many segments after defrag; nseg %u", bf->bf_nseg));
344
} else if (bf->bf_nseg == 0) { /* null packet, discard */
345
sc->sc_stats.ast_tx_nodata++;
346
ieee80211_free_mbuf(m0);
347
return EIO;
348
}
349
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
350
__func__, m0, m0->m_pkthdr.len);
351
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
352
bf->bf_m = m0;
353
354
return 0;
355
}
356
357
/*
358
* Chain together segments+descriptors for a frame - 11n or otherwise.
359
*
360
* For aggregates, this is called on each frame in the aggregate.
361
*/
362
static void
363
ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
364
struct ath_buf *bf, bool is_aggr, int is_first_subframe,
365
int is_last_subframe)
366
{
367
struct ath_hal *ah = sc->sc_ah;
368
char *ds;
369
int i, bp, dsp;
370
HAL_DMA_ADDR bufAddrList[4];
371
uint32_t segLenList[4];
372
int numTxMaps = 1;
373
int isFirstDesc = 1;
374
375
/*
376
* XXX There's txdma and txdma_mgmt; the descriptor
377
* sizes must match.
378
*/
379
struct ath_descdma *dd = &sc->sc_txdma;
380
381
/*
382
* Fillin the remainder of the descriptor info.
383
*/
384
385
/*
386
* We need the number of TX data pointers in each descriptor.
387
* EDMA and later chips support 4 TX buffers per descriptor;
388
* previous chips just support one.
389
*/
390
numTxMaps = sc->sc_tx_nmaps;
391
392
/*
393
* For EDMA and later chips ensure the TX map is fully populated
394
* before advancing to the next descriptor.
395
*/
396
ds = (char *) bf->bf_desc;
397
bp = dsp = 0;
398
bzero(bufAddrList, sizeof(bufAddrList));
399
bzero(segLenList, sizeof(segLenList));
400
for (i = 0; i < bf->bf_nseg; i++) {
401
bufAddrList[bp] = bf->bf_segs[i].ds_addr;
402
segLenList[bp] = bf->bf_segs[i].ds_len;
403
bp++;
404
405
/*
406
* Go to the next segment if this isn't the last segment
407
* and there's space in the current TX map.
408
*/
409
if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
410
continue;
411
412
/*
413
* Last segment or we're out of buffer pointers.
414
*/
415
bp = 0;
416
417
if (i == bf->bf_nseg - 1)
418
ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
419
else
420
ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
421
bf->bf_daddr + dd->dd_descsize * (dsp + 1));
422
423
/*
424
* XXX This assumes that bfs_txq is the actual destination
425
* hardware queue at this point. It may not have been
426
* assigned, it may actually be pointing to the multicast
427
* software TXQ id. These must be fixed!
428
*/
429
ath_hal_filltxdesc(ah, (struct ath_desc *) ds
430
, bufAddrList
431
, segLenList
432
, bf->bf_descid /* XXX desc id */
433
, bf->bf_state.bfs_tx_queue
434
, isFirstDesc /* first segment */
435
, i == bf->bf_nseg - 1 /* last segment */
436
, (struct ath_desc *) ds0 /* first descriptor */
437
);
438
439
/*
440
* Make sure the 11n aggregate fields are cleared.
441
*
442
* XXX TODO: this doesn't need to be called for
443
* aggregate frames; as it'll be called on all
444
* sub-frames. Since the descriptors are in
445
* non-cacheable memory, this leads to some
446
* rather slow writes on MIPS/ARM platforms.
447
*/
448
if (ath_tx_is_11n(sc))
449
ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
450
451
/*
452
* If 11n is enabled, set it up as if it's an aggregate
453
* frame.
454
*/
455
if (is_last_subframe) {
456
ath_hal_set11n_aggr_last(sc->sc_ah,
457
(struct ath_desc *) ds);
458
} else if (is_aggr) {
459
/*
460
* This clears the aggrlen field; so
461
* the caller needs to call set_aggr_first()!
462
*
463
* XXX TODO: don't call this for the first
464
* descriptor in the first frame in an
465
* aggregate!
466
*/
467
ath_hal_set11n_aggr_middle(sc->sc_ah,
468
(struct ath_desc *) ds,
469
bf->bf_state.bfs_ndelim);
470
}
471
isFirstDesc = 0;
472
bf->bf_lastds = (struct ath_desc *) ds;
473
474
/*
475
* Don't forget to skip to the next descriptor.
476
*/
477
ds += sc->sc_tx_desclen;
478
dsp++;
479
480
/*
481
* .. and don't forget to blank these out!
482
*/
483
bzero(bufAddrList, sizeof(bufAddrList));
484
bzero(segLenList, sizeof(segLenList));
485
}
486
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
487
}
488
489
/*
490
* Set the rate control fields in the given descriptor based on
491
* the bf_state fields and node state.
492
*
493
* The bfs fields should already be set with the relevant rate
494
* control information, including whether MRR is to be enabled.
495
*
496
* Since the FreeBSD HAL currently sets up the first TX rate
497
* in ath_hal_setuptxdesc(), this will setup the MRR
498
* conditionally for the pre-11n chips, and call ath_buf_set_rate
499
* unconditionally for 11n chips. These require the 11n rate
500
* scenario to be set if MCS rates are enabled, so it's easier
501
* to just always call it. The caller can then only set rates 2, 3
502
* and 4 if multi-rate retry is needed.
503
*/
504
static void
505
ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
506
struct ath_buf *bf)
507
{
508
struct ath_rc_series *rc = bf->bf_state.bfs_rc;
509
510
/* If mrr is disabled, blank tries 1, 2, 3 */
511
if (! bf->bf_state.bfs_ismrr)
512
rc[1].tries = rc[2].tries = rc[3].tries = 0;
513
514
#if 0
515
/*
516
* If NOACK is set, just set ntries=1.
517
*/
518
else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
519
rc[1].tries = rc[2].tries = rc[3].tries = 0;
520
rc[0].tries = 1;
521
}
522
#endif
523
524
/*
525
* Always call - that way a retried descriptor will
526
* have the MRR fields overwritten.
527
*
528
* XXX TODO: see if this is really needed - setting up
529
* the first descriptor should set the MRR fields to 0
530
* for us anyway.
531
*/
532
if (ath_tx_is_11n(sc)) {
533
ath_buf_set_rate(sc, ni, bf);
534
} else {
535
ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
536
, rc[1].ratecode, rc[1].tries
537
, rc[2].ratecode, rc[2].tries
538
, rc[3].ratecode, rc[3].tries
539
);
540
}
541
}
542
543
/*
544
* Setup segments+descriptors for an 11n aggregate.
545
* bf_first is the first buffer in the aggregate.
546
* The descriptor list must already been linked together using
547
* bf->bf_next.
548
*/
549
static void
550
ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
551
{
552
struct ath_buf *bf, *bf_prev = NULL;
553
struct ath_desc *ds0 = bf_first->bf_desc;
554
555
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
556
__func__, bf_first->bf_state.bfs_nframes,
557
bf_first->bf_state.bfs_al);
558
559
bf = bf_first;
560
561
if (bf->bf_state.bfs_txrate0 == 0)
562
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
563
__func__, bf, 0);
564
if (bf->bf_state.bfs_rc[0].ratecode == 0)
565
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
566
__func__, bf, 0);
567
568
/*
569
* Setup all descriptors of all subframes - this will
570
* call ath_hal_set11naggrmiddle() on every frame.
571
*/
572
while (bf != NULL) {
573
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
574
"%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
575
__func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
576
SEQNO(bf->bf_state.bfs_seqno));
577
578
/*
579
* Setup the initial fields for the first descriptor - all
580
* the non-11n specific stuff.
581
*/
582
ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
583
, bf->bf_state.bfs_pktlen /* packet length */
584
, bf->bf_state.bfs_hdrlen /* header length */
585
, bf->bf_state.bfs_atype /* Atheros packet type */
586
, bf->bf_state.bfs_txpower /* txpower */
587
, bf->bf_state.bfs_txrate0
588
, bf->bf_state.bfs_try0 /* series 0 rate/tries */
589
, bf->bf_state.bfs_keyix /* key cache index */
590
, bf->bf_state.bfs_txantenna /* antenna mode */
591
, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
592
, bf->bf_state.bfs_ctsrate /* rts/cts rate */
593
, bf->bf_state.bfs_ctsduration /* rts/cts duration */
594
);
595
596
/*
597
* First descriptor? Setup the rate control and initial
598
* aggregate header information.
599
*/
600
if (bf == bf_first) {
601
/*
602
* setup first desc with rate and aggr info
603
*/
604
ath_tx_set_ratectrl(sc, bf->bf_node, bf);
605
}
606
607
/*
608
* Setup the descriptors for a multi-descriptor frame.
609
* This is both aggregate and non-aggregate aware.
610
*/
611
ath_tx_chaindesclist(sc, ds0, bf,
612
1, /* is_aggr */
613
!! (bf == bf_first), /* is_first_subframe */
614
!! (bf->bf_next == NULL) /* is_last_subframe */
615
);
616
617
if (bf == bf_first) {
618
/*
619
* Initialise the first 11n aggregate with the
620
* aggregate length and aggregate enable bits.
621
*/
622
ath_hal_set11n_aggr_first(sc->sc_ah,
623
ds0,
624
bf->bf_state.bfs_al,
625
bf->bf_state.bfs_ndelim);
626
}
627
628
/*
629
* Link the last descriptor of the previous frame
630
* to the beginning descriptor of this frame.
631
*/
632
if (bf_prev != NULL)
633
ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
634
bf->bf_daddr);
635
636
/* Save a copy so we can link the next descriptor in */
637
bf_prev = bf;
638
bf = bf->bf_next;
639
}
640
641
/*
642
* Set the first descriptor bf_lastds field to point to
643
* the last descriptor in the last subframe, that's where
644
* the status update will occur.
645
*/
646
bf_first->bf_lastds = bf_prev->bf_lastds;
647
648
/*
649
* And bf_last in the first descriptor points to the end of
650
* the aggregate list.
651
*/
652
bf_first->bf_last = bf_prev;
653
654
/*
655
* For non-AR9300 NICs, which require the rate control
656
* in the final descriptor - let's set that up now.
657
*
658
* This is because the filltxdesc() HAL call doesn't
659
* populate the last segment with rate control information
660
* if firstSeg is also true. For non-aggregate frames
661
* that is fine, as the first frame already has rate control
662
* info. But if the last frame in an aggregate has one
663
* descriptor, both firstseg and lastseg will be true and
664
* the rate info isn't copied.
665
*
666
* This is inefficient on MIPS/ARM platforms that have
667
* non-cachable memory for TX descriptors, but we'll just
668
* make do for now.
669
*
670
* As to why the rate table is stashed in the last descriptor
671
* rather than the first descriptor? Because proctxdesc()
672
* is called on the final descriptor in an MPDU or A-MPDU -
673
* ie, the one that gets updated by the hardware upon
674
* completion. That way proctxdesc() doesn't need to know
675
* about the first _and_ last TX descriptor.
676
*/
677
ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
678
679
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
680
}
681
682
/*
683
* Hand-off a frame to the multicast TX queue.
684
*
685
* This is a software TXQ which will be appended to the CAB queue
686
* during the beacon setup code.
687
*
688
* XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
689
* as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
690
* with the actual hardware txq, or all of this will fall apart.
691
*
692
* XXX It may not be a bad idea to just stuff the QCU ID into bf_state
693
* and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
694
* correctly.
695
*/
696
static void
697
ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
698
struct ath_buf *bf)
699
{
700
ATH_TX_LOCK_ASSERT(sc);
701
702
KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
703
("%s: busy status 0x%x", __func__, bf->bf_flags));
704
705
/*
706
* Ensure that the tx queue is the cabq, so things get
707
* mapped correctly.
708
*/
709
if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
710
DPRINTF(sc, ATH_DEBUG_XMIT,
711
"%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
712
__func__, bf, bf->bf_state.bfs_tx_queue,
713
txq->axq_qnum);
714
}
715
716
ATH_TXQ_LOCK(txq);
717
if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
718
struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
719
struct ieee80211_frame *wh;
720
721
/* mark previous frame */
722
wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
723
wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
724
bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
725
BUS_DMASYNC_PREWRITE);
726
727
/* link descriptor */
728
ath_hal_settxdesclink(sc->sc_ah,
729
bf_last->bf_lastds,
730
bf->bf_daddr);
731
}
732
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
733
ATH_TXQ_UNLOCK(txq);
734
}
735
736
/*
737
* Hand-off packet to a hardware queue.
738
*/
739
static void
740
ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
741
struct ath_buf *bf)
742
{
743
struct ath_hal *ah = sc->sc_ah;
744
struct ath_buf *bf_first;
745
746
/*
747
* Insert the frame on the outbound list and pass it on
748
* to the hardware. Multicast frames buffered for power
749
* save stations and transmit from the CAB queue are stored
750
* on a s/w only queue and loaded on to the CAB queue in
751
* the SWBA handler since frames only go out on DTIM and
752
* to avoid possible races.
753
*/
754
ATH_TX_LOCK_ASSERT(sc);
755
KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
756
("%s: busy status 0x%x", __func__, bf->bf_flags));
757
KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
758
("ath_tx_handoff_hw called for mcast queue"));
759
760
/*
761
* XXX We should instead just verify that sc_txstart_cnt
762
* or ath_txproc_cnt > 0. That would mean that
763
* the reset is going to be waiting for us to complete.
764
*/
765
if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
766
device_printf(sc->sc_dev,
767
"%s: TX dispatch without holding txcount/txstart refcnt!\n",
768
__func__);
769
}
770
771
/*
772
* XXX .. this is going to cause the hardware to get upset;
773
* so we really should find some way to drop or queue
774
* things.
775
*/
776
777
ATH_TXQ_LOCK(txq);
778
779
/*
780
* XXX TODO: if there's a holdingbf, then
781
* ATH_TXQ_PUTRUNNING should be clear.
782
*
783
* If there is a holdingbf and the list is empty,
784
* then axq_link should be pointing to the holdingbf.
785
*
786
* Otherwise it should point to the last descriptor
787
* in the last ath_buf.
788
*
789
* In any case, we should really ensure that we
790
* update the previous descriptor link pointer to
791
* this descriptor, regardless of all of the above state.
792
*
793
* For now this is captured by having axq_link point
794
* to either the holdingbf (if the TXQ list is empty)
795
* or the end of the list (if the TXQ list isn't empty.)
796
* I'd rather just kill axq_link here and do it as above.
797
*/
798
799
/*
800
* Append the frame to the TX queue.
801
*/
802
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
803
ATH_KTR(sc, ATH_KTR_TX, 3,
804
"ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
805
"depth=%d",
806
txq->axq_qnum,
807
bf,
808
txq->axq_depth);
809
810
/*
811
* If there's a link pointer, update it.
812
*
813
* XXX we should replace this with the above logic, just
814
* to kill axq_link with fire.
815
*/
816
if (txq->axq_link != NULL) {
817
*txq->axq_link = bf->bf_daddr;
818
DPRINTF(sc, ATH_DEBUG_XMIT,
819
"%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
820
txq->axq_qnum, txq->axq_link,
821
(caddr_t)bf->bf_daddr, bf->bf_desc,
822
txq->axq_depth);
823
ATH_KTR(sc, ATH_KTR_TX, 5,
824
"ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
825
"lastds=%d",
826
txq->axq_qnum, txq->axq_link,
827
(caddr_t)bf->bf_daddr, bf->bf_desc,
828
bf->bf_lastds);
829
}
830
831
/*
832
* If we've not pushed anything into the hardware yet,
833
* push the head of the queue into the TxDP.
834
*
835
* Once we've started DMA, there's no guarantee that
836
* updating the TxDP with a new value will actually work.
837
* So we just don't do that - if we hit the end of the list,
838
* we keep that buffer around (the "holding buffer") and
839
* re-start DMA by updating the link pointer of _that_
840
* descriptor and then restart DMA.
841
*/
842
if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
843
bf_first = TAILQ_FIRST(&txq->axq_q);
844
txq->axq_flags |= ATH_TXQ_PUTRUNNING;
845
ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
846
DPRINTF(sc, ATH_DEBUG_XMIT,
847
"%s: TXDP[%u] = %p (%p) depth %d\n",
848
__func__, txq->axq_qnum,
849
(caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
850
txq->axq_depth);
851
ATH_KTR(sc, ATH_KTR_TX, 5,
852
"ath_tx_handoff: TXDP[%u] = %p (%p) "
853
"lastds=%p depth %d",
854
txq->axq_qnum,
855
(caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
856
bf_first->bf_lastds,
857
txq->axq_depth);
858
}
859
860
/*
861
* Ensure that the bf TXQ matches this TXQ, so later
862
* checking and holding buffer manipulation is sane.
863
*/
864
if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
865
DPRINTF(sc, ATH_DEBUG_XMIT,
866
"%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
867
__func__, bf, bf->bf_state.bfs_tx_queue,
868
txq->axq_qnum);
869
}
870
871
/*
872
* Track aggregate queue depth.
873
*/
874
if (bf->bf_state.bfs_aggr)
875
txq->axq_aggr_depth++;
876
877
/*
878
* Update the link pointer.
879
*/
880
ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
881
882
/*
883
* Start DMA.
884
*
885
* If we wrote a TxDP above, DMA will start from here.
886
*
887
* If DMA is running, it'll do nothing.
888
*
889
* If the DMA engine hit the end of the QCU list (ie LINK=NULL,
890
* or VEOL) then it stops at the last transmitted write.
891
* We then append a new frame by updating the link pointer
892
* in that descriptor and then kick TxE here; it will re-read
893
* that last descriptor and find the new descriptor to transmit.
894
*
895
* This is why we keep the holding descriptor around.
896
*/
897
ath_hal_txstart(ah, txq->axq_qnum);
898
ATH_TXQ_UNLOCK(txq);
899
ATH_KTR(sc, ATH_KTR_TX, 1,
900
"ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
901
}
902
903
/*
904
* Restart TX DMA for the given TXQ.
905
*
906
* This must be called whether the queue is empty or not.
907
*/
908
static void
909
ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
910
{
911
struct ath_buf *bf, *bf_last;
912
913
ATH_TXQ_LOCK_ASSERT(txq);
914
915
/* XXX make this ATH_TXQ_FIRST */
916
bf = TAILQ_FIRST(&txq->axq_q);
917
bf_last = ATH_TXQ_LAST(txq, axq_q_s);
918
919
if (bf == NULL)
920
return;
921
922
DPRINTF(sc, ATH_DEBUG_RESET,
923
"%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
924
__func__,
925
txq->axq_qnum,
926
bf,
927
bf_last,
928
(uint32_t) bf->bf_daddr);
929
930
#ifdef ATH_DEBUG
931
if (sc->sc_debug & ATH_DEBUG_RESET)
932
ath_tx_dump(sc, txq);
933
#endif
934
935
/*
936
* This is called from a restart, so DMA is known to be
937
* completely stopped.
938
*/
939
KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
940
("%s: Q%d: called with PUTRUNNING=1\n",
941
__func__,
942
txq->axq_qnum));
943
944
ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
945
txq->axq_flags |= ATH_TXQ_PUTRUNNING;
946
947
ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
948
&txq->axq_link);
949
ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
950
}
951
952
/*
953
* Hand off a packet to the hardware (or mcast queue.)
954
*
955
* The relevant hardware txq should be locked.
956
*/
957
static void
958
ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
959
struct ath_buf *bf)
960
{
961
ATH_TX_LOCK_ASSERT(sc);
962
963
#ifdef ATH_DEBUG_ALQ
964
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
965
ath_tx_alq_post(sc, bf);
966
#endif
967
968
if (txq->axq_qnum == ATH_TXQ_SWQ)
969
ath_tx_handoff_mcast(sc, txq, bf);
970
else
971
ath_tx_handoff_hw(sc, txq, bf);
972
}
973
974
/*
975
* Setup a frame for encryption.
976
*
977
* If this fails, then an non-zero error is returned. The mbuf
978
* must be freed by the caller.
979
*/
980
static int
981
ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
982
struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
983
int *keyix)
984
{
985
DPRINTF(sc, ATH_DEBUG_XMIT,
986
"%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
987
__func__,
988
*hdrlen,
989
*pktlen,
990
isfrag,
991
iswep,
992
m0);
993
994
if (iswep) {
995
const struct ieee80211_cipher *cip;
996
struct ieee80211_key *k;
997
998
/*
999
* Construct the 802.11 header+trailer for an encrypted
1000
* frame. The only reason this can fail is because of an
1001
* unknown or unsupported cipher/key type.
1002
*/
1003
k = ieee80211_crypto_encap(ni, m0);
1004
if (k == NULL) {
1005
/*
1006
* This can happen when the key is yanked after the
1007
* frame was queued. Just discard the frame; the
1008
* 802.11 layer counts failures and provides
1009
* debugging/diagnostics.
1010
*/
1011
return (0);
1012
}
1013
/*
1014
* Adjust the packet + header lengths for the crypto
1015
* additions and calculate the h/w key index. When
1016
* a s/w mic is done the frame will have had any mic
1017
* added to it prior to entry so m0->m_pkthdr.len will
1018
* account for it. Otherwise we need to add it to the
1019
* packet length.
1020
*/
1021
cip = k->wk_cipher;
1022
(*hdrlen) += cip->ic_header;
1023
(*pktlen) += cip->ic_header + cip->ic_trailer;
1024
/* NB: frags always have any TKIP MIC done in s/w */
1025
if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1026
(*pktlen) += cip->ic_miclen;
1027
(*keyix) = k->wk_keyix;
1028
} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1029
/*
1030
* Use station key cache slot, if assigned.
1031
*/
1032
(*keyix) = ni->ni_ucastkey.wk_keyix;
1033
if ((*keyix) == IEEE80211_KEYIX_NONE)
1034
(*keyix) = HAL_TXKEYIX_INVALID;
1035
} else
1036
(*keyix) = HAL_TXKEYIX_INVALID;
1037
1038
return (1);
1039
}
1040
1041
/*
1042
* Calculate whether interoperability protection is required for
1043
* this frame.
1044
*
1045
* This requires the rate control information be filled in,
1046
* as the protection requirement depends upon the current
1047
* operating mode / PHY.
1048
*/
1049
static void
1050
ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1051
{
1052
struct ieee80211_frame *wh;
1053
uint8_t rix;
1054
uint16_t flags;
1055
int shortPreamble;
1056
const HAL_RATE_TABLE *rt = sc->sc_currates;
1057
struct ieee80211com *ic = &sc->sc_ic;
1058
1059
flags = bf->bf_state.bfs_txflags;
1060
rix = bf->bf_state.bfs_rc[0].rix;
1061
shortPreamble = bf->bf_state.bfs_shpream;
1062
wh = mtod(bf->bf_m, struct ieee80211_frame *);
1063
1064
/* Disable frame protection for TOA probe frames */
1065
if (bf->bf_flags & ATH_BUF_TOA_PROBE) {
1066
/* XXX count */
1067
flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA);
1068
bf->bf_state.bfs_doprot = 0;
1069
goto finish;
1070
}
1071
1072
/*
1073
* If 802.11g protection is enabled, determine whether
1074
* to use RTS/CTS or just CTS. Note that this is only
1075
* done for OFDM unicast frames.
1076
*/
1077
if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1078
rt->info[rix].phy == IEEE80211_T_OFDM &&
1079
(flags & HAL_TXDESC_NOACK) == 0) {
1080
bf->bf_state.bfs_doprot = 1;
1081
/* XXX fragments must use CCK rates w/ protection */
1082
if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1083
flags |= HAL_TXDESC_RTSENA;
1084
} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1085
flags |= HAL_TXDESC_CTSENA;
1086
}
1087
/*
1088
* For frags it would be desirable to use the
1089
* highest CCK rate for RTS/CTS. But stations
1090
* farther away may detect it at a lower CCK rate
1091
* so use the configured protection rate instead
1092
* (for now).
1093
*/
1094
sc->sc_stats.ast_tx_protect++;
1095
}
1096
1097
/*
1098
* If 11n protection is enabled and it's a HT frame,
1099
* enable RTS.
1100
*
1101
* XXX ic_htprotmode or ic_curhtprotmode?
1102
* XXX should it_htprotmode only matter if ic_curhtprotmode
1103
* XXX indicates it's not a HT pure environment?
1104
*/
1105
if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1106
rt->info[rix].phy == IEEE80211_T_HT &&
1107
(flags & HAL_TXDESC_NOACK) == 0) {
1108
flags |= HAL_TXDESC_RTSENA;
1109
sc->sc_stats.ast_tx_htprotect++;
1110
}
1111
1112
finish:
1113
bf->bf_state.bfs_txflags = flags;
1114
}
1115
1116
/*
1117
* Update the frame duration given the currently selected rate.
1118
*
1119
* This also updates the frame duration value, so it will require
1120
* a DMA flush.
1121
*/
1122
static void
1123
ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1124
{
1125
struct ieee80211_frame *wh;
1126
uint8_t rix;
1127
uint16_t flags;
1128
int shortPreamble;
1129
struct ath_hal *ah = sc->sc_ah;
1130
const HAL_RATE_TABLE *rt = sc->sc_currates;
1131
int isfrag = bf->bf_m->m_flags & M_FRAG;
1132
1133
flags = bf->bf_state.bfs_txflags;
1134
rix = bf->bf_state.bfs_rc[0].rix;
1135
shortPreamble = bf->bf_state.bfs_shpream;
1136
wh = mtod(bf->bf_m, struct ieee80211_frame *);
1137
1138
/*
1139
* Calculate duration. This logically belongs in the 802.11
1140
* layer but it lacks sufficient information to calculate it.
1141
*/
1142
if ((flags & HAL_TXDESC_NOACK) == 0 && !IEEE80211_IS_CTL(wh)) {
1143
u_int16_t dur;
1144
if (shortPreamble)
1145
dur = rt->info[rix].spAckDuration;
1146
else
1147
dur = rt->info[rix].lpAckDuration;
1148
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1149
dur += dur; /* additional SIFS+ACK */
1150
/*
1151
* Include the size of next fragment so NAV is
1152
* updated properly. The last fragment uses only
1153
* the ACK duration
1154
*
1155
* XXX TODO: ensure that the rate lookup for each
1156
* fragment is the same as the rate used by the
1157
* first fragment!
1158
*/
1159
dur += ath_hal_computetxtime(ah,
1160
rt,
1161
bf->bf_nextfraglen,
1162
rix, shortPreamble,
1163
AH_TRUE);
1164
}
1165
if (isfrag) {
1166
/*
1167
* Force hardware to use computed duration for next
1168
* fragment by disabling multi-rate retry which updates
1169
* duration based on the multi-rate duration table.
1170
*/
1171
bf->bf_state.bfs_ismrr = 0;
1172
bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1173
/* XXX update bfs_rc[0].try? */
1174
}
1175
1176
/* Update the duration field itself */
1177
*(u_int16_t *)wh->i_dur = htole16(dur);
1178
}
1179
}
1180
1181
static uint8_t
1182
ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1183
int cix, int shortPreamble)
1184
{
1185
uint8_t ctsrate;
1186
1187
/*
1188
* CTS transmit rate is derived from the transmit rate
1189
* by looking in the h/w rate table. We must also factor
1190
* in whether or not a short preamble is to be used.
1191
*/
1192
/* NB: cix is set above where RTS/CTS is enabled */
1193
KASSERT(cix != 0xff, ("cix not setup"));
1194
ctsrate = rt->info[cix].rateCode;
1195
1196
/* XXX this should only matter for legacy rates */
1197
if (shortPreamble)
1198
ctsrate |= rt->info[cix].shortPreamble;
1199
1200
return (ctsrate);
1201
}
1202
1203
/*
1204
* Calculate the RTS/CTS duration for legacy frames.
1205
*/
1206
static int
1207
ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1208
int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1209
int flags)
1210
{
1211
int ctsduration = 0;
1212
1213
/* This mustn't be called for HT modes */
1214
if (rt->info[cix].phy == IEEE80211_T_HT) {
1215
printf("%s: HT rate where it shouldn't be (0x%x)\n",
1216
__func__, rt->info[cix].rateCode);
1217
return (-1);
1218
}
1219
1220
/*
1221
* Compute the transmit duration based on the frame
1222
* size and the size of an ACK frame. We call into the
1223
* HAL to do the computation since it depends on the
1224
* characteristics of the actual PHY being used.
1225
*
1226
* NB: CTS is assumed the same size as an ACK so we can
1227
* use the precalculated ACK durations.
1228
*/
1229
if (shortPreamble) {
1230
if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1231
ctsduration += rt->info[cix].spAckDuration;
1232
ctsduration += ath_hal_computetxtime(ah,
1233
rt, pktlen, rix, AH_TRUE, AH_TRUE);
1234
if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1235
ctsduration += rt->info[rix].spAckDuration;
1236
} else {
1237
if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1238
ctsduration += rt->info[cix].lpAckDuration;
1239
ctsduration += ath_hal_computetxtime(ah,
1240
rt, pktlen, rix, AH_FALSE, AH_TRUE);
1241
if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1242
ctsduration += rt->info[rix].lpAckDuration;
1243
}
1244
1245
return (ctsduration);
1246
}
1247
1248
/*
1249
* Update the given ath_buf with updated rts/cts setup and duration
1250
* values.
1251
*
1252
* To support rate lookups for each software retry, the rts/cts rate
1253
* and cts duration must be re-calculated.
1254
*
1255
* This function assumes the RTS/CTS flags have been set as needed;
1256
* mrr has been disabled; and the rate control lookup has been done.
1257
*
1258
* XXX TODO: MRR need only be disabled for the pre-11n NICs.
1259
* XXX The 11n NICs support per-rate RTS/CTS configuration.
1260
*/
1261
static void
1262
ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1263
{
1264
uint16_t ctsduration = 0;
1265
uint8_t ctsrate = 0;
1266
uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1267
uint8_t cix = 0;
1268
const HAL_RATE_TABLE *rt = sc->sc_currates;
1269
1270
/*
1271
* No RTS/CTS enabled? Don't bother.
1272
*/
1273
if ((bf->bf_state.bfs_txflags &
1274
(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1275
/* XXX is this really needed? */
1276
bf->bf_state.bfs_ctsrate = 0;
1277
bf->bf_state.bfs_ctsduration = 0;
1278
return;
1279
}
1280
1281
/*
1282
* If protection is enabled, use the protection rix control
1283
* rate. Otherwise use the rate0 control rate.
1284
*/
1285
if (bf->bf_state.bfs_doprot)
1286
rix = sc->sc_protrix;
1287
else
1288
rix = bf->bf_state.bfs_rc[0].rix;
1289
1290
/*
1291
* If the raw path has hard-coded ctsrate0 to something,
1292
* use it.
1293
*/
1294
if (bf->bf_state.bfs_ctsrate0 != 0)
1295
cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1296
else
1297
/* Control rate from above */
1298
cix = rt->info[rix].controlRate;
1299
1300
/* Calculate the rtscts rate for the given cix */
1301
ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1302
bf->bf_state.bfs_shpream);
1303
1304
/* The 11n chipsets do ctsduration calculations for you */
1305
if (! ath_tx_is_11n(sc))
1306
ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1307
bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1308
rt, bf->bf_state.bfs_txflags);
1309
1310
/* Squirrel away in ath_buf */
1311
bf->bf_state.bfs_ctsrate = ctsrate;
1312
bf->bf_state.bfs_ctsduration = ctsduration;
1313
1314
/*
1315
* Must disable multi-rate retry when using RTS/CTS.
1316
*/
1317
if (!sc->sc_mrrprot) {
1318
bf->bf_state.bfs_ismrr = 0;
1319
bf->bf_state.bfs_try0 =
1320
bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1321
}
1322
}
1323
1324
/*
1325
* Setup the descriptor chain for a normal or fast-frame
1326
* frame.
1327
*
1328
* XXX TODO: extend to include the destination hardware QCU ID.
1329
* Make sure that is correct. Make sure that when being added
1330
* to the mcastq, the CABQ QCUID is set or things will get a bit
1331
* odd.
1332
*/
1333
static void
1334
ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1335
{
1336
struct ath_desc *ds = bf->bf_desc;
1337
struct ath_hal *ah = sc->sc_ah;
1338
1339
if (bf->bf_state.bfs_txrate0 == 0)
1340
DPRINTF(sc, ATH_DEBUG_XMIT,
1341
"%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1342
1343
ath_hal_setuptxdesc(ah, ds
1344
, bf->bf_state.bfs_pktlen /* packet length */
1345
, bf->bf_state.bfs_hdrlen /* header length */
1346
, bf->bf_state.bfs_atype /* Atheros packet type */
1347
, bf->bf_state.bfs_txpower /* txpower */
1348
, bf->bf_state.bfs_txrate0
1349
, bf->bf_state.bfs_try0 /* series 0 rate/tries */
1350
, bf->bf_state.bfs_keyix /* key cache index */
1351
, bf->bf_state.bfs_txantenna /* antenna mode */
1352
, bf->bf_state.bfs_txflags /* flags */
1353
, bf->bf_state.bfs_ctsrate /* rts/cts rate */
1354
, bf->bf_state.bfs_ctsduration /* rts/cts duration */
1355
);
1356
1357
/*
1358
* This will be overridden when the descriptor chain is written.
1359
*/
1360
bf->bf_lastds = ds;
1361
bf->bf_last = bf;
1362
1363
/* Set rate control and descriptor chain for this frame */
1364
ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1365
ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1366
}
1367
1368
/*
1369
* Do a rate lookup.
1370
*
1371
* This performs a rate lookup for the given ath_buf only if it's required.
1372
* Non-data frames and raw frames don't require it.
1373
*
1374
* This populates the primary and MRR entries; MRR values are
1375
* then disabled later on if something requires it (eg RTS/CTS on
1376
* pre-11n chipsets.
1377
*
1378
* This needs to be done before the RTS/CTS fields are calculated
1379
* as they may depend upon the rate chosen.
1380
*/
1381
static void
1382
ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid,
1383
int pktlen, int is_aggr)
1384
{
1385
uint8_t rate, rix;
1386
int try0;
1387
int maxdur; // Note: Unused for now
1388
int maxpktlen;
1389
1390
if (! bf->bf_state.bfs_doratelookup)
1391
return;
1392
1393
/* Get rid of any previous state */
1394
bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1395
1396
ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1397
ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1398
pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur, &maxpktlen);
1399
1400
/* In case MRR is disabled, make sure rc[0] is setup correctly */
1401
bf->bf_state.bfs_rc[0].rix = rix;
1402
bf->bf_state.bfs_rc[0].ratecode = rate;
1403
bf->bf_state.bfs_rc[0].tries = try0;
1404
1405
if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1406
ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1407
is_aggr, bf->bf_state.bfs_rc);
1408
ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1409
1410
sc->sc_txrix = rix; /* for LED blinking */
1411
sc->sc_lastdatarix = rix; /* for fast frames */
1412
bf->bf_state.bfs_try0 = try0;
1413
bf->bf_state.bfs_txrate0 = rate;
1414
bf->bf_state.bfs_rc_maxpktlen = maxpktlen;
1415
}
1416
1417
/*
1418
* Update the CLRDMASK bit in the ath_buf if it needs to be set.
1419
*/
1420
static void
1421
ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1422
struct ath_buf *bf)
1423
{
1424
struct ath_node *an = ATH_NODE(bf->bf_node);
1425
1426
ATH_TX_LOCK_ASSERT(sc);
1427
1428
if (an->clrdmask == 1) {
1429
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1430
an->clrdmask = 0;
1431
}
1432
}
1433
1434
/*
1435
* Return whether this frame should be software queued or
1436
* direct dispatched.
1437
*
1438
* When doing powersave, BAR frames should be queued but other management
1439
* frames should be directly sent.
1440
*
1441
* When not doing powersave, stick BAR frames into the hardware queue
1442
* so it goes out even though the queue is paused.
1443
*
1444
* For now, management frames are also software queued by default.
1445
*/
1446
static int
1447
ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1448
struct mbuf *m0, int *queue_to_head)
1449
{
1450
struct ieee80211_node *ni = &an->an_node;
1451
struct ieee80211_frame *wh;
1452
uint8_t type, subtype;
1453
1454
wh = mtod(m0, struct ieee80211_frame *);
1455
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1456
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1457
1458
(*queue_to_head) = 0;
1459
1460
/* If it's not in powersave - direct-dispatch BAR */
1461
if ((ATH_NODE(ni)->an_is_powersave == 0)
1462
&& type == IEEE80211_FC0_TYPE_CTL &&
1463
subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1464
DPRINTF(sc, ATH_DEBUG_SW_TX,
1465
"%s: BAR: TX'ing direct\n", __func__);
1466
return (0);
1467
} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1468
&& type == IEEE80211_FC0_TYPE_CTL &&
1469
subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1470
/* BAR TX whilst asleep; queue */
1471
DPRINTF(sc, ATH_DEBUG_SW_TX,
1472
"%s: swq: TX'ing\n", __func__);
1473
(*queue_to_head) = 1;
1474
return (1);
1475
} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1476
&& (type == IEEE80211_FC0_TYPE_MGT ||
1477
type == IEEE80211_FC0_TYPE_CTL)) {
1478
/*
1479
* Other control/mgmt frame; bypass software queuing
1480
* for now!
1481
*/
1482
DPRINTF(sc, ATH_DEBUG_XMIT,
1483
"%s: %6D: Node is asleep; sending mgmt "
1484
"(type=%d, subtype=%d)\n",
1485
__func__, ni->ni_macaddr, ":", type, subtype);
1486
return (0);
1487
} else {
1488
return (1);
1489
}
1490
}
1491
1492
/*
1493
* Transmit the given frame to the hardware.
1494
*
1495
* The frame must already be setup; rate control must already have
1496
* been done.
1497
*
1498
* XXX since the TXQ lock is being held here (and I dislike holding
1499
* it for this long when not doing software aggregation), later on
1500
* break this function into "setup_normal" and "xmit_normal". The
1501
* lock only needs to be held for the ath_tx_handoff call.
1502
*
1503
* XXX we don't update the leak count here - if we're doing
1504
* direct frame dispatch, we need to be able to do it without
1505
* decrementing the leak count (eg multicast queue frames.)
1506
*/
1507
static void
1508
ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1509
struct ath_buf *bf)
1510
{
1511
struct ath_node *an = ATH_NODE(bf->bf_node);
1512
struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1513
1514
ATH_TX_LOCK_ASSERT(sc);
1515
1516
/*
1517
* For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1518
* set a completion handler however it doesn't (yet) properly
1519
* handle the strict ordering requirements needed for normal,
1520
* non-aggregate session frames.
1521
*
1522
* Once this is implemented, only set CLRDMASK like this for
1523
* frames that must go out - eg management/raw frames.
1524
*/
1525
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1526
1527
/* Setup the descriptor before handoff */
1528
ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);
1529
ath_tx_calc_duration(sc, bf);
1530
ath_tx_calc_protection(sc, bf);
1531
ath_tx_set_rtscts(sc, bf);
1532
ath_tx_rate_fill_rcflags(sc, bf);
1533
ath_tx_setds(sc, bf);
1534
1535
/* Track per-TID hardware queue depth correctly */
1536
tid->hwq_depth++;
1537
1538
/* Assign the completion handler */
1539
bf->bf_comp = ath_tx_normal_comp;
1540
1541
/* Hand off to hardware */
1542
ath_tx_handoff(sc, txq, bf);
1543
}
1544
1545
/*
1546
* Do the basic frame setup stuff that's required before the frame
1547
* is added to a software queue.
1548
*
1549
* All frames get mostly the same treatment and it's done once.
1550
* Retransmits fiddle with things like the rate control setup,
1551
* setting the retransmit bit in the packet; doing relevant DMA/bus
1552
* syncing and relinking it (back) into the hardware TX queue.
1553
*
1554
* Note that this may cause the mbuf to be reallocated, so
1555
* m0 may not be valid.
1556
*
1557
* If there's a problem then the mbuf is freed and an error
1558
* is returned. The ath_buf then needs to be freed by the
1559
* caller.
1560
*/
1561
static int
1562
ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1563
struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1564
{
1565
struct ieee80211vap *vap = ni->ni_vap;
1566
struct ieee80211com *ic = &sc->sc_ic;
1567
int error, iswep, ismcast, isfrag, ismrr;
1568
int keyix, hdrlen, pktlen, try0 = 0;
1569
u_int8_t rix = 0, txrate = 0;
1570
struct ath_desc *ds;
1571
struct ieee80211_frame *wh;
1572
u_int subtype, flags;
1573
HAL_PKT_TYPE atype;
1574
const HAL_RATE_TABLE *rt;
1575
HAL_BOOL shortPreamble;
1576
struct ath_node *an;
1577
1578
/* XXX TODO: this pri is only used for non-QoS check, right? */
1579
u_int pri;
1580
1581
/*
1582
* To ensure that both sequence numbers and the CCMP PN handling
1583
* is "correct", make sure that the relevant TID queue is locked.
1584
* Otherwise the CCMP PN and seqno may appear out of order, causing
1585
* re-ordered frames to have out of order CCMP PN's, resulting
1586
* in many, many frame drops.
1587
*/
1588
ATH_TX_LOCK_ASSERT(sc);
1589
1590
wh = mtod(m0, struct ieee80211_frame *);
1591
iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1592
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1593
isfrag = m0->m_flags & M_FRAG;
1594
hdrlen = ieee80211_anyhdrsize(wh);
1595
/*
1596
* Packet length must not include any
1597
* pad bytes; deduct them here.
1598
*/
1599
pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1600
1601
/* seqno allocate, only if AMPDU isn't running */
1602
if ((m0->m_flags & M_AMPDU_MPDU) == 0)
1603
ieee80211_output_seqno_assign(ni, -1, m0);
1604
1605
/* Handle encryption twiddling if needed */
1606
if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1607
&pktlen, &keyix)) {
1608
ieee80211_free_mbuf(m0);
1609
return EIO;
1610
}
1611
1612
/* packet header may have moved, reset our local pointer */
1613
wh = mtod(m0, struct ieee80211_frame *);
1614
1615
pktlen += IEEE80211_CRC_LEN;
1616
1617
/*
1618
* Load the DMA map so any coalescing is done. This
1619
* also calculates the number of descriptors we need.
1620
*/
1621
error = ath_tx_dmasetup(sc, bf, m0);
1622
if (error != 0)
1623
return error;
1624
KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1625
bf->bf_node = ni; /* NB: held reference */
1626
m0 = bf->bf_m; /* NB: may have changed */
1627
wh = mtod(m0, struct ieee80211_frame *);
1628
1629
/* setup descriptors */
1630
ds = bf->bf_desc;
1631
rt = sc->sc_currates;
1632
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1633
1634
/*
1635
* NB: the 802.11 layer marks whether or not we should
1636
* use short preamble based on the current mode and
1637
* negotiated parameters.
1638
*/
1639
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1640
(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1641
shortPreamble = AH_TRUE;
1642
sc->sc_stats.ast_tx_shortpre++;
1643
} else {
1644
shortPreamble = AH_FALSE;
1645
}
1646
1647
an = ATH_NODE(ni);
1648
//flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
1649
flags = 0;
1650
ismrr = 0; /* default no multi-rate retry*/
1651
1652
pri = ath_tx_getac(sc, m0); /* honor classification */
1653
/* XXX use txparams instead of fixed values */
1654
/*
1655
* Calculate Atheros packet type from IEEE80211 packet header,
1656
* setup for rate calculations, and select h/w transmit queue.
1657
*/
1658
switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1659
case IEEE80211_FC0_TYPE_MGT:
1660
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1661
if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1662
atype = HAL_PKT_TYPE_BEACON;
1663
else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1664
atype = HAL_PKT_TYPE_PROBE_RESP;
1665
else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1666
atype = HAL_PKT_TYPE_ATIM;
1667
else
1668
atype = HAL_PKT_TYPE_NORMAL; /* XXX */
1669
rix = an->an_mgmtrix;
1670
txrate = rt->info[rix].rateCode;
1671
if (shortPreamble)
1672
txrate |= rt->info[rix].shortPreamble;
1673
try0 = ATH_TXMGTTRY;
1674
flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1675
break;
1676
case IEEE80211_FC0_TYPE_CTL:
1677
atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
1678
rix = an->an_mgmtrix;
1679
txrate = rt->info[rix].rateCode;
1680
if (shortPreamble)
1681
txrate |= rt->info[rix].shortPreamble;
1682
try0 = ATH_TXMGTTRY;
1683
flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1684
break;
1685
case IEEE80211_FC0_TYPE_DATA:
1686
atype = HAL_PKT_TYPE_NORMAL; /* default */
1687
/*
1688
* Data frames: multicast frames go out at a fixed rate,
1689
* EAPOL frames use the mgmt frame rate; otherwise consult
1690
* the rate control module for the rate to use.
1691
*/
1692
if (ismcast) {
1693
rix = an->an_mcastrix;
1694
txrate = rt->info[rix].rateCode;
1695
if (shortPreamble)
1696
txrate |= rt->info[rix].shortPreamble;
1697
try0 = 1;
1698
} else if (m0->m_flags & M_EAPOL) {
1699
/* XXX? maybe always use long preamble? */
1700
rix = an->an_mgmtrix;
1701
txrate = rt->info[rix].rateCode;
1702
if (shortPreamble)
1703
txrate |= rt->info[rix].shortPreamble;
1704
try0 = ATH_TXMAXTRY; /* XXX?too many? */
1705
} else {
1706
/*
1707
* Do rate lookup on each TX, rather than using
1708
* the hard-coded TX information decided here.
1709
*/
1710
ismrr = 1;
1711
bf->bf_state.bfs_doratelookup = 1;
1712
}
1713
1714
/*
1715
* Check whether to set NOACK for this WME category or not.
1716
*/
1717
if (ieee80211_wme_vap_ac_is_noack(vap, pri))
1718
flags |= HAL_TXDESC_NOACK;
1719
break;
1720
default:
1721
device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1722
wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1723
/* XXX statistic */
1724
/* XXX free tx dmamap */
1725
ieee80211_free_mbuf(m0);
1726
return EIO;
1727
}
1728
1729
/*
1730
* There are two known scenarios where the frame AC doesn't match
1731
* what the destination TXQ is.
1732
*
1733
* + non-QoS frames (eg management?) that the net80211 stack has
1734
* assigned a higher AC to, but since it's a non-QoS TID, it's
1735
* being thrown into TID 16. TID 16 gets the AC_BE queue.
1736
* It's quite possible that management frames should just be
1737
* direct dispatched to hardware rather than go via the software
1738
* queue; that should be investigated in the future. There are
1739
* some specific scenarios where this doesn't make sense, mostly
1740
* surrounding ADDBA request/response - hence why that is special
1741
* cased.
1742
*
1743
* + Multicast frames going into the VAP mcast queue. That shows up
1744
* as "TXQ 11".
1745
*
1746
* This driver should eventually support separate TID and TXQ locking,
1747
* allowing for arbitrary AC frames to appear on arbitrary software
1748
* queues, being queued to the "correct" hardware queue when needed.
1749
*/
1750
#if 0
1751
if (txq != sc->sc_ac2q[pri]) {
1752
DPRINTF(sc, ATH_DEBUG_XMIT,
1753
"%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1754
__func__,
1755
txq,
1756
txq->axq_qnum,
1757
pri,
1758
sc->sc_ac2q[pri],
1759
sc->sc_ac2q[pri]->axq_qnum);
1760
}
1761
#endif
1762
1763
/*
1764
* Calculate miscellaneous flags.
1765
*/
1766
if (ismcast) {
1767
flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
1768
} else if (pktlen > vap->iv_rtsthreshold &&
1769
(ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1770
flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
1771
sc->sc_stats.ast_tx_rts++;
1772
}
1773
if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
1774
sc->sc_stats.ast_tx_noack++;
1775
#ifdef IEEE80211_SUPPORT_TDMA
1776
if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1777
DPRINTF(sc, ATH_DEBUG_TDMA,
1778
"%s: discard frame, ACK required w/ TDMA\n", __func__);
1779
sc->sc_stats.ast_tdma_ack++;
1780
/* XXX free tx dmamap */
1781
ieee80211_free_mbuf(m0);
1782
return EIO;
1783
}
1784
#endif
1785
1786
/*
1787
* If it's a frame to do location reporting on,
1788
* communicate it to the HAL.
1789
*/
1790
if (ieee80211_get_toa_params(m0, NULL)) {
1791
device_printf(sc->sc_dev,
1792
"%s: setting TX positioning bit\n", __func__);
1793
flags |= HAL_TXDESC_POS;
1794
1795
/*
1796
* Note: The hardware reports timestamps for
1797
* each of the RX'ed packets as part of the packet
1798
* exchange. So this means things like RTS/CTS
1799
* exchanges, as well as the final ACK.
1800
*
1801
* So, if you send a RTS-protected NULL data frame,
1802
* you'll get an RX report for the RTS response, then
1803
* an RX report for the NULL frame, and then the TX
1804
* completion at the end.
1805
*
1806
* NOTE: it doesn't work right for CCK frames;
1807
* there's no channel info data provided unless
1808
* it's OFDM or HT. Will have to dig into it.
1809
*/
1810
flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);
1811
bf->bf_flags |= ATH_BUF_TOA_PROBE;
1812
}
1813
1814
#if 0
1815
/*
1816
* Placeholder: if you want to transmit with the azimuth
1817
* timestamp in the end of the payload, here's where you
1818
* should set the TXDESC field.
1819
*/
1820
flags |= HAL_TXDESC_HWTS;
1821
#endif
1822
1823
/*
1824
* Determine if a tx interrupt should be generated for
1825
* this descriptor. We take a tx interrupt to reap
1826
* descriptors when the h/w hits an EOL condition or
1827
* when the descriptor is specifically marked to generate
1828
* an interrupt. We periodically mark descriptors in this
1829
* way to insure timely replenishing of the supply needed
1830
* for sending frames. Defering interrupts reduces system
1831
* load and potentially allows more concurrent work to be
1832
* done but if done to aggressively can cause senders to
1833
* backup.
1834
*
1835
* NB: use >= to deal with sc_txintrperiod changing
1836
* dynamically through sysctl.
1837
*/
1838
if (flags & HAL_TXDESC_INTREQ) {
1839
txq->axq_intrcnt = 0;
1840
} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1841
flags |= HAL_TXDESC_INTREQ;
1842
txq->axq_intrcnt = 0;
1843
}
1844
1845
/* This point forward is actual TX bits */
1846
1847
/*
1848
* At this point we are committed to sending the frame
1849
* and we don't need to look at m_nextpkt; clear it in
1850
* case this frame is part of frag chain.
1851
*/
1852
m0->m_nextpkt = NULL;
1853
1854
if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1855
ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1856
sc->sc_hwmap[rix].ieeerate, -1);
1857
1858
if (ieee80211_radiotap_active_vap(vap)) {
1859
sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1860
if (iswep)
1861
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1862
if (isfrag)
1863
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1864
sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1865
sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1866
sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1867
1868
ieee80211_radiotap_tx(vap, m0);
1869
}
1870
1871
/* Blank the legacy rate array */
1872
bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1873
1874
/*
1875
* ath_buf_set_rate needs at least one rate/try to setup
1876
* the rate scenario.
1877
*/
1878
bf->bf_state.bfs_rc[0].rix = rix;
1879
bf->bf_state.bfs_rc[0].tries = try0;
1880
bf->bf_state.bfs_rc[0].ratecode = txrate;
1881
1882
/* Store the decided rate index values away */
1883
bf->bf_state.bfs_pktlen = pktlen;
1884
bf->bf_state.bfs_hdrlen = hdrlen;
1885
bf->bf_state.bfs_atype = atype;
1886
bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1887
bf->bf_state.bfs_txrate0 = txrate;
1888
bf->bf_state.bfs_try0 = try0;
1889
bf->bf_state.bfs_keyix = keyix;
1890
bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1891
bf->bf_state.bfs_txflags = flags;
1892
bf->bf_state.bfs_shpream = shortPreamble;
1893
1894
/* XXX this should be done in ath_tx_setrate() */
1895
bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1896
bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1897
bf->bf_state.bfs_ctsduration = 0;
1898
bf->bf_state.bfs_ismrr = ismrr;
1899
1900
return 0;
1901
}
1902
1903
/*
1904
* Queue a frame to the hardware or software queue.
1905
*
1906
* This can be called by the net80211 code.
1907
*
1908
* XXX what about locking? Or, push the seqno assign into the
1909
* XXX aggregate scheduler so its serialised?
1910
*
1911
* XXX When sending management frames via ath_raw_xmit(),
1912
* should CLRDMASK be set unconditionally?
1913
*/
1914
int
1915
ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1916
struct ath_buf *bf, struct mbuf *m0)
1917
{
1918
struct ieee80211vap *vap = ni->ni_vap;
1919
struct ath_vap *avp = ATH_VAP(vap);
1920
int r = 0;
1921
u_int pri;
1922
int tid;
1923
struct ath_txq *txq;
1924
int ismcast;
1925
const struct ieee80211_frame *wh;
1926
int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1927
ieee80211_seq seqno;
1928
uint8_t type, subtype;
1929
int queue_to_head;
1930
1931
ATH_TX_LOCK_ASSERT(sc);
1932
1933
/*
1934
* Determine the target hardware queue.
1935
*
1936
* For multicast frames, the txq gets overridden appropriately
1937
* depending upon the state of PS. If powersave is enabled
1938
* then they get added to the cabq for later transmit.
1939
*
1940
* The "fun" issue here is that group addressed frames should
1941
* have the sequence number from a different pool, rather than
1942
* the per-TID pool. That means that even QoS group addressed
1943
* frames will have a sequence number from that global value,
1944
* which means if we transmit different group addressed frames
1945
* at different traffic priorities, the sequence numbers will
1946
* all be out of whack. So - chances are, the right thing
1947
* to do here is to always put group addressed frames into the BE
1948
* queue, and ignore the TID for queue selection.
1949
*
1950
* For any other frame, we do a TID/QoS lookup inside the frame
1951
* to see what the TID should be. If it's a non-QoS frame, the
1952
* AC and TID are overridden. The TID/TXQ code assumes the
1953
* TID is on a predictable hardware TXQ, so we don't support
1954
* having a node TID queued to multiple hardware TXQs.
1955
* This may change in the future but would require some locking
1956
* fudgery.
1957
*/
1958
pri = ath_tx_getac(sc, m0);
1959
tid = ath_tx_gettid(sc, m0);
1960
1961
txq = sc->sc_ac2q[pri];
1962
wh = mtod(m0, struct ieee80211_frame *);
1963
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1964
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1965
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1966
1967
/*
1968
* Enforce how deep the multicast queue can grow.
1969
*
1970
* XXX duplicated in ath_raw_xmit().
1971
*/
1972
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1973
if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1974
> sc->sc_txq_mcastq_maxdepth) {
1975
sc->sc_stats.ast_tx_mcastq_overflow++;
1976
m_freem(m0);
1977
return (ENOBUFS);
1978
}
1979
}
1980
1981
/*
1982
* Enforce how deep the unicast queue can grow.
1983
*
1984
* If the node is in power save then we don't want
1985
* the software queue to grow too deep, or a node may
1986
* end up consuming all of the ath_buf entries.
1987
*
1988
* For now, only do this for DATA frames.
1989
*
1990
* We will want to cap how many management/control
1991
* frames get punted to the software queue so it doesn't
1992
* fill up. But the correct solution isn't yet obvious.
1993
* In any case, this check should at least let frames pass
1994
* that we are direct-dispatching.
1995
*
1996
* XXX TODO: duplicate this to the raw xmit path!
1997
*/
1998
if (type == IEEE80211_FC0_TYPE_DATA &&
1999
ATH_NODE(ni)->an_is_powersave &&
2000
ATH_NODE(ni)->an_swq_depth >
2001
sc->sc_txq_node_psq_maxdepth) {
2002
sc->sc_stats.ast_tx_node_psq_overflow++;
2003
m_freem(m0);
2004
return (ENOBUFS);
2005
}
2006
2007
/* A-MPDU TX */
2008
is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
2009
is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
2010
is_ampdu = is_ampdu_tx | is_ampdu_pending;
2011
2012
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
2013
__func__, tid, pri, is_ampdu);
2014
2015
/* Set local packet state, used to queue packets to hardware */
2016
bf->bf_state.bfs_tid = tid;
2017
bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2018
bf->bf_state.bfs_pri = pri;
2019
2020
#if 1
2021
/*
2022
* When servicing one or more stations in power-save mode
2023
* (or) if there is some mcast data waiting on the mcast
2024
* queue (to prevent out of order delivery) multicast frames
2025
* must be bufferd until after the beacon.
2026
*
2027
* TODO: we should lock the mcastq before we check the length.
2028
*/
2029
if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
2030
txq = &avp->av_mcastq;
2031
/*
2032
* Mark the frame as eventually belonging on the CAB
2033
* queue, so the descriptor setup functions will
2034
* correctly initialise the descriptor 'qcuId' field.
2035
*/
2036
bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
2037
}
2038
#endif
2039
2040
/* Do the generic frame setup */
2041
/* XXX should just bzero the bf_state? */
2042
bf->bf_state.bfs_dobaw = 0;
2043
2044
/* A-MPDU TX? Manually set sequence number */
2045
/*
2046
* Don't do it whilst pending; the net80211 layer still
2047
* assigns them.
2048
*
2049
* Don't assign A-MPDU sequence numbers to group address
2050
* frames; they come from a different sequence number space.
2051
*/
2052
if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) {
2053
/*
2054
* Always call; this function will
2055
* handle making sure that null data frames
2056
* and group-addressed frames don't get a sequence number
2057
* from the current TID and thus mess with the BAW.
2058
*/
2059
seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2060
2061
/*
2062
* Don't add QoS NULL frames and group-addressed frames
2063
* to the BAW.
2064
*/
2065
if (IEEE80211_QOS_HAS_SEQ(wh) &&
2066
(! IEEE80211_IS_MULTICAST(wh->i_addr1)) &&
2067
(! IEEE80211_IS_QOS_NULL(wh))) {
2068
bf->bf_state.bfs_dobaw = 1;
2069
}
2070
}
2071
2072
/*
2073
* If needed, the sequence number has been assigned.
2074
* Squirrel it away somewhere easy to get to.
2075
*/
2076
bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2077
2078
/* Is ampdu pending? fetch the seqno and print it out */
2079
if (is_ampdu_pending)
2080
DPRINTF(sc, ATH_DEBUG_SW_TX,
2081
"%s: tid %d: ampdu pending, seqno %d\n",
2082
__func__, tid, M_SEQNO_GET(m0));
2083
2084
/* This also sets up the DMA map; crypto; frame parameters, etc */
2085
r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2086
if (r != 0)
2087
return (r);
2088
2089
/* At this point m0 could have changed! */
2090
m0 = bf->bf_m;
2091
2092
#if 1
2093
/*
2094
* If it's a multicast frame, do a direct-dispatch to the
2095
* destination hardware queue. Don't bother software
2096
* queuing it.
2097
*/
2098
/*
2099
* If it's a BAR frame, do a direct dispatch to the
2100
* destination hardware queue. Don't bother software
2101
* queuing it, as the TID will now be paused.
2102
* Sending a BAR frame can occur from the net80211 txa timer
2103
* (ie, retries) or from the ath txtask (completion call.)
2104
* It queues directly to hardware because the TID is paused
2105
* at this point (and won't be unpaused until the BAR has
2106
* either been TXed successfully or max retries has been
2107
* reached.)
2108
*/
2109
/*
2110
* Until things are better debugged - if this node is asleep
2111
* and we're sending it a non-BAR frame, direct dispatch it.
2112
* Why? Because we need to figure out what's actually being
2113
* sent - eg, during reassociation/reauthentication after
2114
* the node (last) disappeared whilst asleep, the driver should
2115
* have unpaused/unsleep'ed the node. So until that is
2116
* sorted out, use this workaround.
2117
*/
2118
if (txq == &avp->av_mcastq) {
2119
DPRINTF(sc, ATH_DEBUG_SW_TX,
2120
"%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2121
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2122
ath_tx_xmit_normal(sc, txq, bf);
2123
} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2124
&queue_to_head)) {
2125
ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2126
} else {
2127
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2128
ath_tx_xmit_normal(sc, txq, bf);
2129
}
2130
#else
2131
/*
2132
* For now, since there's no software queue,
2133
* direct-dispatch to the hardware.
2134
*/
2135
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2136
/*
2137
* Update the current leak count if
2138
* we're leaking frames; and set the
2139
* MORE flag as appropriate.
2140
*/
2141
ath_tx_leak_count_update(sc, tid, bf);
2142
ath_tx_xmit_normal(sc, txq, bf);
2143
#endif
2144
return 0;
2145
}
2146
2147
static int
2148
ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2149
struct ath_buf *bf, struct mbuf *m0,
2150
const struct ieee80211_bpf_params *params)
2151
{
2152
struct ieee80211com *ic = &sc->sc_ic;
2153
struct ieee80211vap *vap = ni->ni_vap;
2154
int error, ismcast, ismrr;
2155
int keyix, hdrlen, pktlen, try0, txantenna;
2156
u_int8_t rix, txrate;
2157
struct ieee80211_frame *wh;
2158
u_int flags;
2159
HAL_PKT_TYPE atype;
2160
const HAL_RATE_TABLE *rt;
2161
struct ath_desc *ds;
2162
u_int pri;
2163
int o_tid = -1;
2164
int do_override;
2165
uint8_t type, subtype;
2166
int queue_to_head;
2167
struct ath_node *an = ATH_NODE(ni);
2168
2169
ATH_TX_LOCK_ASSERT(sc);
2170
2171
wh = mtod(m0, struct ieee80211_frame *);
2172
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2173
hdrlen = ieee80211_anyhdrsize(wh);
2174
/*
2175
* Packet length must not include any
2176
* pad bytes; deduct them here.
2177
*/
2178
/* XXX honor IEEE80211_BPF_DATAPAD */
2179
pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2180
2181
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2182
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2183
2184
ATH_KTR(sc, ATH_KTR_TX, 2,
2185
"ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2186
2187
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2188
__func__, ismcast);
2189
2190
pri = params->ibp_pri & 3;
2191
/* Override pri if the frame isn't a QoS one */
2192
if (! IEEE80211_QOS_HAS_SEQ(wh))
2193
pri = ath_tx_getac(sc, m0);
2194
2195
/* XXX If it's an ADDBA, override the correct queue */
2196
do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2197
2198
/* Map ADDBA to the correct priority */
2199
if (do_override) {
2200
#if 1
2201
DPRINTF(sc, ATH_DEBUG_XMIT,
2202
"%s: overriding tid %d pri %d -> %d\n",
2203
__func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2204
#endif
2205
pri = TID_TO_WME_AC(o_tid);
2206
}
2207
2208
/*
2209
* "pri" is the hardware queue to transmit on.
2210
*
2211
* Look at the description in ath_tx_start() to understand
2212
* what needs to be "fixed" here so we just use the TID
2213
* for QoS frames.
2214
*/
2215
2216
/* seqno allocate, only if AMPDU isn't running */
2217
if ((m0->m_flags & M_AMPDU_MPDU) == 0)
2218
ieee80211_output_seqno_assign(ni, -1, m0);
2219
2220
/* Handle encryption twiddling if needed */
2221
if (! ath_tx_tag_crypto(sc, ni,
2222
m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2223
&hdrlen, &pktlen, &keyix)) {
2224
ieee80211_free_mbuf(m0);
2225
return EIO;
2226
}
2227
/* packet header may have moved, reset our local pointer */
2228
wh = mtod(m0, struct ieee80211_frame *);
2229
2230
/* Do the generic frame setup */
2231
/* XXX should just bzero the bf_state? */
2232
bf->bf_state.bfs_dobaw = 0;
2233
2234
error = ath_tx_dmasetup(sc, bf, m0);
2235
if (error != 0)
2236
return error;
2237
m0 = bf->bf_m; /* NB: may have changed */
2238
wh = mtod(m0, struct ieee80211_frame *);
2239
KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2240
bf->bf_node = ni; /* NB: held reference */
2241
2242
/* Always enable CLRDMASK for raw frames for now.. */
2243
flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
2244
flags |= HAL_TXDESC_INTREQ; /* force interrupt */
2245
if (params->ibp_flags & IEEE80211_BPF_RTS)
2246
flags |= HAL_TXDESC_RTSENA;
2247
else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2248
/* XXX assume 11g/11n protection? */
2249
bf->bf_state.bfs_doprot = 1;
2250
flags |= HAL_TXDESC_CTSENA;
2251
}
2252
/* XXX leave ismcast to injector? */
2253
if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2254
flags |= HAL_TXDESC_NOACK;
2255
2256
rt = sc->sc_currates;
2257
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2258
2259
/* Fetch first rate information */
2260
rix = ath_tx_findrix(sc, params->ibp_rate0);
2261
try0 = params->ibp_try0;
2262
2263
/*
2264
* Override EAPOL rate as appropriate.
2265
*/
2266
if (m0->m_flags & M_EAPOL) {
2267
/* XXX? maybe always use long preamble? */
2268
rix = an->an_mgmtrix;
2269
try0 = ATH_TXMAXTRY; /* XXX?too many? */
2270
}
2271
2272
/*
2273
* If it's a frame to do location reporting on,
2274
* communicate it to the HAL.
2275
*/
2276
if (ieee80211_get_toa_params(m0, NULL)) {
2277
device_printf(sc->sc_dev,
2278
"%s: setting TX positioning bit\n", __func__);
2279
flags |= HAL_TXDESC_POS;
2280
flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);
2281
bf->bf_flags |= ATH_BUF_TOA_PROBE;
2282
}
2283
2284
txrate = rt->info[rix].rateCode;
2285
if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2286
txrate |= rt->info[rix].shortPreamble;
2287
sc->sc_txrix = rix;
2288
ismrr = (params->ibp_try1 != 0);
2289
txantenna = params->ibp_pri >> 2;
2290
if (txantenna == 0) /* XXX? */
2291
txantenna = sc->sc_txantenna;
2292
2293
/*
2294
* Since ctsrate is fixed, store it away for later
2295
* use when the descriptor fields are being set.
2296
*/
2297
if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2298
bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2299
2300
/*
2301
* NB: we mark all packets as type PSPOLL so the h/w won't
2302
* set the sequence number, duration, etc.
2303
*/
2304
atype = HAL_PKT_TYPE_PSPOLL;
2305
2306
if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2307
ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2308
sc->sc_hwmap[rix].ieeerate, -1);
2309
2310
if (ieee80211_radiotap_active_vap(vap)) {
2311
sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2312
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2313
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2314
if (m0->m_flags & M_FRAG)
2315
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2316
sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2317
sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2318
ieee80211_get_node_txpower(ni));
2319
sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2320
2321
ieee80211_radiotap_tx(vap, m0);
2322
}
2323
2324
/*
2325
* Formulate first tx descriptor with tx controls.
2326
*/
2327
ds = bf->bf_desc;
2328
/* XXX check return value? */
2329
2330
/* Store the decided rate index values away */
2331
bf->bf_state.bfs_pktlen = pktlen;
2332
bf->bf_state.bfs_hdrlen = hdrlen;
2333
bf->bf_state.bfs_atype = atype;
2334
bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2335
ieee80211_get_node_txpower(ni));
2336
bf->bf_state.bfs_txrate0 = txrate;
2337
bf->bf_state.bfs_try0 = try0;
2338
bf->bf_state.bfs_keyix = keyix;
2339
bf->bf_state.bfs_txantenna = txantenna;
2340
bf->bf_state.bfs_txflags = flags;
2341
bf->bf_state.bfs_shpream =
2342
!! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2343
2344
/* Set local packet state, used to queue packets to hardware */
2345
bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2346
bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2347
bf->bf_state.bfs_pri = pri;
2348
2349
/* XXX this should be done in ath_tx_setrate() */
2350
bf->bf_state.bfs_ctsrate = 0;
2351
bf->bf_state.bfs_ctsduration = 0;
2352
bf->bf_state.bfs_ismrr = ismrr;
2353
2354
/* Blank the legacy rate array */
2355
bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2356
2357
bf->bf_state.bfs_rc[0].rix = rix;
2358
bf->bf_state.bfs_rc[0].tries = try0;
2359
bf->bf_state.bfs_rc[0].ratecode = txrate;
2360
2361
if (ismrr) {
2362
int rix;
2363
2364
rix = ath_tx_findrix(sc, params->ibp_rate1);
2365
bf->bf_state.bfs_rc[1].rix = rix;
2366
bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2367
2368
rix = ath_tx_findrix(sc, params->ibp_rate2);
2369
bf->bf_state.bfs_rc[2].rix = rix;
2370
bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2371
2372
rix = ath_tx_findrix(sc, params->ibp_rate3);
2373
bf->bf_state.bfs_rc[3].rix = rix;
2374
bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2375
}
2376
/*
2377
* All the required rate control decisions have been made;
2378
* fill in the rc flags.
2379
*/
2380
ath_tx_rate_fill_rcflags(sc, bf);
2381
2382
/* NB: no buffered multicast in power save support */
2383
2384
/*
2385
* If we're overiding the ADDBA destination, dump directly
2386
* into the hardware queue, right after any pending
2387
* frames to that node are.
2388
*/
2389
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2390
__func__, do_override);
2391
2392
#if 1
2393
/*
2394
* Put addba frames in the right place in the right TID/HWQ.
2395
*/
2396
if (do_override) {
2397
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2398
/*
2399
* XXX if it's addba frames, should we be leaking
2400
* them out via the frame leak method?
2401
* XXX for now let's not risk it; but we may wish
2402
* to investigate this later.
2403
*/
2404
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2405
} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2406
&queue_to_head)) {
2407
/* Queue to software queue */
2408
ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2409
} else {
2410
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2411
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2412
}
2413
#else
2414
/* Direct-dispatch to the hardware */
2415
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2416
/*
2417
* Update the current leak count if
2418
* we're leaking frames; and set the
2419
* MORE flag as appropriate.
2420
*/
2421
ath_tx_leak_count_update(sc, tid, bf);
2422
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2423
#endif
2424
return 0;
2425
}
2426
2427
/*
2428
* Send a raw frame.
2429
*
2430
* This can be called by net80211.
2431
*/
2432
int
2433
ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2434
const struct ieee80211_bpf_params *params)
2435
{
2436
struct ieee80211com *ic = ni->ni_ic;
2437
struct ath_softc *sc = ic->ic_softc;
2438
struct ath_buf *bf;
2439
struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2440
int error = 0;
2441
2442
ATH_PCU_LOCK(sc);
2443
if (sc->sc_inreset_cnt > 0) {
2444
DPRINTF(sc, ATH_DEBUG_XMIT,
2445
"%s: sc_inreset_cnt > 0; bailing\n", __func__);
2446
error = EIO;
2447
ATH_PCU_UNLOCK(sc);
2448
goto badbad;
2449
}
2450
sc->sc_txstart_cnt++;
2451
ATH_PCU_UNLOCK(sc);
2452
2453
/* Wake the hardware up already */
2454
ATH_LOCK(sc);
2455
ath_power_set_power_state(sc, HAL_PM_AWAKE);
2456
ATH_UNLOCK(sc);
2457
2458
ATH_TX_LOCK(sc);
2459
2460
if (!sc->sc_running || sc->sc_invalid) {
2461
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2462
__func__, sc->sc_running, sc->sc_invalid);
2463
m_freem(m);
2464
error = ENETDOWN;
2465
goto bad;
2466
}
2467
2468
/*
2469
* Enforce how deep the multicast queue can grow.
2470
*
2471
* XXX duplicated in ath_tx_start().
2472
*/
2473
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2474
if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2475
> sc->sc_txq_mcastq_maxdepth) {
2476
sc->sc_stats.ast_tx_mcastq_overflow++;
2477
error = ENOBUFS;
2478
}
2479
2480
if (error != 0) {
2481
m_freem(m);
2482
goto bad;
2483
}
2484
}
2485
2486
/*
2487
* Grab a TX buffer and associated resources.
2488
*/
2489
bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2490
if (bf == NULL) {
2491
sc->sc_stats.ast_tx_nobuf++;
2492
m_freem(m);
2493
error = ENOBUFS;
2494
goto bad;
2495
}
2496
ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2497
m, params, bf);
2498
2499
if (params == NULL) {
2500
/*
2501
* Legacy path; interpret frame contents to decide
2502
* precisely how to send the frame.
2503
*/
2504
if (ath_tx_start(sc, ni, bf, m)) {
2505
error = EIO; /* XXX */
2506
goto bad2;
2507
}
2508
} else {
2509
/*
2510
* Caller supplied explicit parameters to use in
2511
* sending the frame.
2512
*/
2513
if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2514
error = EIO; /* XXX */
2515
goto bad2;
2516
}
2517
}
2518
sc->sc_wd_timer = 5;
2519
sc->sc_stats.ast_tx_raw++;
2520
2521
/*
2522
* Update the TIM - if there's anything queued to the
2523
* software queue and power save is enabled, we should
2524
* set the TIM.
2525
*/
2526
ath_tx_update_tim(sc, ni, 1);
2527
2528
ATH_TX_UNLOCK(sc);
2529
2530
ATH_PCU_LOCK(sc);
2531
sc->sc_txstart_cnt--;
2532
ATH_PCU_UNLOCK(sc);
2533
2534
/* Put the hardware back to sleep if required */
2535
ATH_LOCK(sc);
2536
ath_power_restore_power_state(sc);
2537
ATH_UNLOCK(sc);
2538
2539
return 0;
2540
2541
bad2:
2542
ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2543
"bf=%p",
2544
m,
2545
params,
2546
bf);
2547
ATH_TXBUF_LOCK(sc);
2548
ath_returnbuf_head(sc, bf);
2549
ATH_TXBUF_UNLOCK(sc);
2550
2551
bad:
2552
ATH_TX_UNLOCK(sc);
2553
2554
ATH_PCU_LOCK(sc);
2555
sc->sc_txstart_cnt--;
2556
ATH_PCU_UNLOCK(sc);
2557
2558
/* Put the hardware back to sleep if required */
2559
ATH_LOCK(sc);
2560
ath_power_restore_power_state(sc);
2561
ATH_UNLOCK(sc);
2562
2563
badbad:
2564
ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2565
m, params);
2566
sc->sc_stats.ast_tx_raw_fail++;
2567
2568
return error;
2569
}
2570
2571
/* Some helper functions */
2572
2573
/*
2574
* ADDBA (and potentially others) need to be placed in the same
2575
* hardware queue as the TID/node it's relating to. This is so
2576
* it goes out after any pending non-aggregate frames to the
2577
* same node/TID.
2578
*
2579
* If this isn't done, the ADDBA can go out before the frames
2580
* queued in hardware. Even though these frames have a sequence
2581
* number -earlier- than the ADDBA can be transmitted (but
2582
* no frames whose sequence numbers are after the ADDBA should
2583
* be!) they'll arrive after the ADDBA - and the receiving end
2584
* will simply drop them as being out of the BAW.
2585
*
2586
* The frames can't be appended to the TID software queue - it'll
2587
* never be sent out. So these frames have to be directly
2588
* dispatched to the hardware, rather than queued in software.
2589
* So if this function returns true, the TXQ has to be
2590
* overridden and it has to be directly dispatched.
2591
*
2592
* It's a dirty hack, but someone's gotta do it.
2593
*/
2594
2595
/*
2596
* Return an alternate TID for ADDBA request frames.
2597
*
2598
* Yes, this likely should be done in the net80211 layer.
2599
*/
2600
static int
2601
ath_tx_action_frame_override_queue(struct ath_softc *sc,
2602
struct ieee80211_node *ni,
2603
struct mbuf *m0, int *tid)
2604
{
2605
struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2606
struct ieee80211_action_ba_addbarequest *ia;
2607
uint8_t *frm;
2608
uint16_t baparamset;
2609
2610
/* Not action frame? Bail */
2611
if (! IEEE80211_IS_MGMT_ACTION(wh))
2612
return 0;
2613
2614
/* XXX Not needed for frames we send? */
2615
#if 0
2616
/* Correct length? */
2617
if (! ieee80211_parse_action(ni, m))
2618
return 0;
2619
#endif
2620
2621
/* Extract out action frame */
2622
frm = (u_int8_t *)&wh[1];
2623
ia = (struct ieee80211_action_ba_addbarequest *) frm;
2624
2625
/* Not ADDBA? Bail */
2626
if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2627
return 0;
2628
if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2629
return 0;
2630
2631
/* Extract TID, return it */
2632
baparamset = le16toh(ia->rq_baparamset);
2633
*tid = (int) _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID);
2634
2635
return 1;
2636
}
2637
2638
/* Per-node software queue operations */
2639
2640
/*
2641
* Add the current packet to the given BAW.
2642
* It is assumed that the current packet
2643
*
2644
* + fits inside the BAW;
2645
* + already has had a sequence number allocated.
2646
*
2647
* Since the BAW status may be modified by both the ath task and
2648
* the net80211/ifnet contexts, the TID must be locked.
2649
*/
2650
void
2651
ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2652
struct ath_tid *tid, struct ath_buf *bf)
2653
{
2654
int index, cindex;
2655
struct ieee80211_tx_ampdu *tap;
2656
2657
ATH_TX_LOCK_ASSERT(sc);
2658
2659
if (bf->bf_state.bfs_isretried)
2660
return;
2661
2662
tap = ath_tx_get_tx_tid(an, tid->tid);
2663
2664
if (! bf->bf_state.bfs_dobaw) {
2665
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2666
"%s: dobaw=0, seqno=%d, window %d:%d\n",
2667
__func__, SEQNO(bf->bf_state.bfs_seqno),
2668
tap->txa_start, tap->txa_wnd);
2669
}
2670
2671
if (bf->bf_state.bfs_addedbaw)
2672
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2673
"%s: re-added? tid=%d, seqno %d; window %d:%d; "
2674
"baw head=%d tail=%d\n",
2675
__func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2676
tap->txa_start, tap->txa_wnd, tid->baw_head,
2677
tid->baw_tail);
2678
2679
/*
2680
* Verify that the given sequence number is not outside of the
2681
* BAW. Complain loudly if that's the case.
2682
*/
2683
if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2684
SEQNO(bf->bf_state.bfs_seqno))) {
2685
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2686
"%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2687
"baw head=%d tail=%d\n",
2688
__func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2689
tap->txa_start, tap->txa_wnd, tid->baw_head,
2690
tid->baw_tail);
2691
}
2692
2693
/*
2694
* ni->ni_txseqs[] is the currently allocated seqno.
2695
* the txa state contains the current baw start.
2696
*/
2697
index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2698
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2699
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2700
"%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2701
"baw head=%d tail=%d\n",
2702
__func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2703
tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2704
tid->baw_tail);
2705
2706
#if 0
2707
assert(tid->tx_buf[cindex] == NULL);
2708
#endif
2709
if (tid->tx_buf[cindex] != NULL) {
2710
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2711
"%s: ba packet dup (index=%d, cindex=%d, "
2712
"head=%d, tail=%d)\n",
2713
__func__, index, cindex, tid->baw_head, tid->baw_tail);
2714
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2715
"%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2716
__func__,
2717
tid->tx_buf[cindex],
2718
SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2719
bf,
2720
SEQNO(bf->bf_state.bfs_seqno)
2721
);
2722
}
2723
tid->tx_buf[cindex] = bf;
2724
2725
if (index >= ((tid->baw_tail - tid->baw_head) &
2726
(ATH_TID_MAX_BUFS - 1))) {
2727
tid->baw_tail = cindex;
2728
INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2729
}
2730
}
2731
2732
/*
2733
* Flip the BAW buffer entry over from the existing one to the new one.
2734
*
2735
* When software retransmitting a (sub-)frame, it is entirely possible that
2736
* the frame ath_buf is marked as BUSY and can't be immediately reused.
2737
* In that instance the buffer is cloned and the new buffer is used for
2738
* retransmit. We thus need to update the ath_buf slot in the BAW buf
2739
* tracking array to maintain consistency.
2740
*/
2741
static void
2742
ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2743
struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2744
{
2745
int index, cindex;
2746
struct ieee80211_tx_ampdu *tap;
2747
int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2748
2749
ATH_TX_LOCK_ASSERT(sc);
2750
2751
tap = ath_tx_get_tx_tid(an, tid->tid);
2752
index = ATH_BA_INDEX(tap->txa_start, seqno);
2753
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2754
2755
/*
2756
* Just warn for now; if it happens then we should find out
2757
* about it. It's highly likely the aggregation session will
2758
* soon hang.
2759
*/
2760
if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2761
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2762
"%s: retransmitted buffer"
2763
" has mismatching seqno's, BA session may hang.\n",
2764
__func__);
2765
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2766
"%s: old seqno=%d, new_seqno=%d\n", __func__,
2767
old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2768
}
2769
2770
if (tid->tx_buf[cindex] != old_bf) {
2771
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2772
"%s: ath_buf pointer incorrect; "
2773
" has m BA session may hang.\n", __func__);
2774
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2775
"%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2776
}
2777
2778
tid->tx_buf[cindex] = new_bf;
2779
}
2780
2781
/*
2782
* seq_start - left edge of BAW
2783
* seq_next - current/next sequence number to allocate
2784
*
2785
* Since the BAW status may be modified by both the ath task and
2786
* the net80211/ifnet contexts, the TID must be locked.
2787
*/
2788
static void
2789
ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2790
struct ath_tid *tid, const struct ath_buf *bf)
2791
{
2792
int index, cindex;
2793
struct ieee80211_tx_ampdu *tap;
2794
int seqno = SEQNO(bf->bf_state.bfs_seqno);
2795
2796
ATH_TX_LOCK_ASSERT(sc);
2797
2798
tap = ath_tx_get_tx_tid(an, tid->tid);
2799
index = ATH_BA_INDEX(tap->txa_start, seqno);
2800
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2801
2802
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2803
"%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2804
"baw head=%d, tail=%d\n",
2805
__func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2806
cindex, tid->baw_head, tid->baw_tail);
2807
2808
/*
2809
* If this occurs then we have a big problem - something else
2810
* has slid tap->txa_start along without updating the BAW
2811
* tracking start/end pointers. Thus the TX BAW state is now
2812
* completely busted.
2813
*
2814
* But for now, since I haven't yet fixed TDMA and buffer cloning,
2815
* it's quite possible that a cloned buffer is making its way
2816
* here and causing it to fire off. Disable TDMA for now.
2817
*/
2818
if (tid->tx_buf[cindex] != bf) {
2819
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2820
"%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2821
__func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2822
tid->tx_buf[cindex],
2823
(tid->tx_buf[cindex] != NULL) ?
2824
SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2825
}
2826
2827
tid->tx_buf[cindex] = NULL;
2828
2829
while (tid->baw_head != tid->baw_tail &&
2830
!tid->tx_buf[tid->baw_head]) {
2831
INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2832
INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2833
}
2834
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2835
"%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2836
__func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2837
}
2838
2839
static void
2840
ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2841
struct ath_buf *bf)
2842
{
2843
struct ieee80211_frame *wh;
2844
2845
ATH_TX_LOCK_ASSERT(sc);
2846
2847
if (tid->an->an_leak_count > 0) {
2848
wh = mtod(bf->bf_m, struct ieee80211_frame *);
2849
2850
/*
2851
* Update MORE based on the software/net80211 queue states.
2852
*/
2853
if ((tid->an->an_stack_psq > 0)
2854
|| (tid->an->an_swq_depth > 0))
2855
wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2856
else
2857
wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2858
2859
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2860
"%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2861
__func__,
2862
tid->an->an_node.ni_macaddr,
2863
":",
2864
tid->an->an_leak_count,
2865
tid->an->an_stack_psq,
2866
tid->an->an_swq_depth,
2867
!! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2868
2869
/*
2870
* Re-sync the underlying buffer.
2871
*/
2872
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2873
BUS_DMASYNC_PREWRITE);
2874
2875
tid->an->an_leak_count --;
2876
}
2877
}
2878
2879
static int
2880
ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2881
{
2882
2883
ATH_TX_LOCK_ASSERT(sc);
2884
2885
if (tid->an->an_leak_count > 0) {
2886
return (1);
2887
}
2888
if (tid->paused)
2889
return (0);
2890
return (1);
2891
}
2892
2893
/*
2894
* Mark the current node/TID as ready to TX.
2895
*
2896
* This is done to make it easy for the software scheduler to
2897
* find which nodes have data to send.
2898
*
2899
* The TXQ lock must be held.
2900
*/
2901
void
2902
ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2903
{
2904
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2905
2906
ATH_TX_LOCK_ASSERT(sc);
2907
2908
/*
2909
* If we are leaking out a frame to this destination
2910
* for PS-POLL, ensure that we allow scheduling to
2911
* occur.
2912
*/
2913
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2914
return; /* paused, can't schedule yet */
2915
2916
if (tid->sched)
2917
return; /* already scheduled */
2918
2919
tid->sched = 1;
2920
2921
#if 0
2922
/*
2923
* If this is a sleeping node we're leaking to, given
2924
* it a higher priority. This is so bad for QoS it hurts.
2925
*/
2926
if (tid->an->an_leak_count) {
2927
TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2928
} else {
2929
TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2930
}
2931
#endif
2932
2933
/*
2934
* We can't do the above - it'll confuse the TXQ software
2935
* scheduler which will keep checking the _head_ TID
2936
* in the list to see if it has traffic. If we queue
2937
* a TID to the head of the list and it doesn't transmit,
2938
* we'll check it again.
2939
*
2940
* So, get the rest of this leaking frames support working
2941
* and reliable first and _then_ optimise it so they're
2942
* pushed out in front of any other pending software
2943
* queued nodes.
2944
*/
2945
TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2946
}
2947
2948
/*
2949
* Mark the current node as no longer needing to be polled for
2950
* TX packets.
2951
*
2952
* The TXQ lock must be held.
2953
*/
2954
static void
2955
ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2956
{
2957
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2958
2959
ATH_TX_LOCK_ASSERT(sc);
2960
2961
if (tid->sched == 0)
2962
return;
2963
2964
tid->sched = 0;
2965
TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2966
}
2967
2968
/*
2969
* Assign a sequence number manually to the given frame.
2970
*
2971
* This should only be called for A-MPDU TX frames.
2972
*
2973
* Note: for group addressed frames, the sequence number
2974
* should be from NONQOS_TID, and net80211 should have
2975
* already assigned it for us.
2976
*/
2977
static ieee80211_seq
2978
ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2979
struct ath_buf *bf, struct mbuf *m0)
2980
{
2981
struct ieee80211_frame *wh;
2982
int tid;
2983
ieee80211_seq seqno;
2984
uint8_t subtype;
2985
2986
wh = mtod(m0, struct ieee80211_frame *);
2987
tid = ieee80211_gettid(wh);
2988
2989
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n",
2990
__func__, tid, IEEE80211_QOS_HAS_SEQ(wh));
2991
2992
/* XXX Is it a control frame? Ignore */
2993
2994
/* Does the packet require a sequence number? */
2995
if (! IEEE80211_QOS_HAS_SEQ(wh))
2996
return -1;
2997
2998
ATH_TX_LOCK_ASSERT(sc);
2999
3000
/* TODO: can this use ieee80211_output_seqno_assign() now? */
3001
3002
/*
3003
* Is it a QOS NULL Data frame? Give it a sequence number from
3004
* the default TID (IEEE80211_NONQOS_TID.)
3005
*
3006
* The RX path of everything I've looked at doesn't include the NULL
3007
* data frame sequence number in the aggregation state updates, so
3008
* assigning it a sequence number there will cause a BAW hole on the
3009
* RX side.
3010
*/
3011
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3012
if (IEEE80211_IS_QOS_NULL(wh)) {
3013
/* XXX no locking for this TID? This is a bit of a problem. */
3014
seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3015
INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3016
} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3017
/*
3018
* group addressed frames get a sequence number from
3019
* a different sequence number space.
3020
*/
3021
seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3022
INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3023
} else {
3024
/* Manually assign sequence number */
3025
seqno = ni->ni_txseqs[tid];
3026
INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
3027
}
3028
*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
3029
M_SEQNO_SET(m0, seqno);
3030
3031
/* Return so caller can do something with it if needed */
3032
DPRINTF(sc, ATH_DEBUG_SW_TX,
3033
"%s: -> subtype=0x%x, tid=%d, seqno=%d\n",
3034
__func__, subtype, tid, seqno);
3035
return seqno;
3036
}
3037
3038
/*
3039
* Attempt to direct dispatch an aggregate frame to hardware.
3040
* If the frame is out of BAW, queue.
3041
* Otherwise, schedule it as a single frame.
3042
*/
3043
static void
3044
ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
3045
struct ath_txq *txq, struct ath_buf *bf)
3046
{
3047
struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
3048
struct ieee80211_tx_ampdu *tap;
3049
3050
ATH_TX_LOCK_ASSERT(sc);
3051
3052
tap = ath_tx_get_tx_tid(an, tid->tid);
3053
3054
/* paused? queue */
3055
if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
3056
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3057
/* XXX don't sched - we're paused! */
3058
return;
3059
}
3060
3061
/* outside baw? queue */
3062
if (bf->bf_state.bfs_dobaw &&
3063
(! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
3064
SEQNO(bf->bf_state.bfs_seqno)))) {
3065
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3066
ath_tx_tid_sched(sc, tid);
3067
return;
3068
}
3069
3070
/*
3071
* This is a temporary check and should be removed once
3072
* all the relevant code paths have been fixed.
3073
*
3074
* During aggregate retries, it's possible that the head
3075
* frame will fail (which has the bfs_aggr and bfs_nframes
3076
* fields set for said aggregate) and will be retried as
3077
* a single frame. In this instance, the values should
3078
* be reset or the completion code will get upset with you.
3079
*/
3080
if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
3081
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3082
"%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
3083
bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
3084
bf->bf_state.bfs_aggr = 0;
3085
bf->bf_state.bfs_nframes = 1;
3086
}
3087
3088
/* Update CLRDMASK just before this frame is queued */
3089
ath_tx_update_clrdmask(sc, tid, bf);
3090
3091
/* Direct dispatch to hardware */
3092
ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,
3093
false);
3094
ath_tx_calc_duration(sc, bf);
3095
ath_tx_calc_protection(sc, bf);
3096
ath_tx_set_rtscts(sc, bf);
3097
ath_tx_rate_fill_rcflags(sc, bf);
3098
ath_tx_setds(sc, bf);
3099
3100
/* Statistics */
3101
sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3102
3103
/* Track per-TID hardware queue depth correctly */
3104
tid->hwq_depth++;
3105
3106
/* Add to BAW */
3107
if (bf->bf_state.bfs_dobaw) {
3108
ath_tx_addto_baw(sc, an, tid, bf);
3109
bf->bf_state.bfs_addedbaw = 1;
3110
}
3111
3112
/* Set completion handler, multi-frame aggregate or not */
3113
bf->bf_comp = ath_tx_aggr_comp;
3114
3115
/*
3116
* Update the current leak count if
3117
* we're leaking frames; and set the
3118
* MORE flag as appropriate.
3119
*/
3120
ath_tx_leak_count_update(sc, tid, bf);
3121
3122
/* Hand off to hardware */
3123
ath_tx_handoff(sc, txq, bf);
3124
}
3125
3126
/*
3127
* Attempt to send the packet.
3128
* If the queue isn't busy, direct-dispatch.
3129
* If the queue is busy enough, queue the given packet on the
3130
* relevant software queue.
3131
*/
3132
void
3133
ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3134
struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3135
{
3136
struct ath_node *an = ATH_NODE(ni);
3137
struct ieee80211_frame *wh;
3138
struct ath_tid *atid;
3139
int pri, tid;
3140
struct mbuf *m0 = bf->bf_m;
3141
3142
ATH_TX_LOCK_ASSERT(sc);
3143
3144
/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3145
wh = mtod(m0, struct ieee80211_frame *);
3146
pri = ath_tx_getac(sc, m0);
3147
tid = ath_tx_gettid(sc, m0);
3148
atid = &an->an_tid[tid];
3149
3150
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3151
__func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3152
3153
/* Set local packet state, used to queue packets to hardware */
3154
/* XXX potentially duplicate info, re-check */
3155
bf->bf_state.bfs_tid = tid;
3156
bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3157
bf->bf_state.bfs_pri = pri;
3158
3159
/*
3160
* If the hardware queue isn't busy, queue it directly.
3161
* If the hardware queue is busy, queue it.
3162
* If the TID is paused or the traffic it outside BAW, software
3163
* queue it.
3164
*
3165
* If the node is in power-save and we're leaking a frame,
3166
* leak a single frame.
3167
*/
3168
if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3169
/* TID is paused, queue */
3170
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3171
/*
3172
* If the caller requested that it be sent at a high
3173
* priority, queue it at the head of the list.
3174
*/
3175
if (queue_to_head)
3176
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3177
else
3178
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3179
} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3180
/* AMPDU pending; queue */
3181
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3182
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3183
/* XXX sched? */
3184
} else if (ath_tx_ampdu_running(sc, an, tid)) {
3185
/*
3186
* AMPDU running, queue single-frame if the hardware queue
3187
* isn't busy.
3188
*
3189
* If the hardware queue is busy, sending an aggregate frame
3190
* then just hold off so we can queue more aggregate frames.
3191
*
3192
* Otherwise we may end up with single frames leaking through
3193
* because we are dispatching them too quickly.
3194
*
3195
* TODO: maybe we should treat this as two policies - minimise
3196
* latency, or maximise throughput. Then for BE/BK we can
3197
* maximise throughput, and VO/VI (if AMPDU is enabled!)
3198
* minimise latency.
3199
*/
3200
3201
/*
3202
* Always queue the frame to the tail of the list.
3203
*/
3204
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3205
3206
/*
3207
* If the hardware queue isn't busy, direct dispatch
3208
* the head frame in the list.
3209
*
3210
* Note: if we're say, configured to do ADDBA but not A-MPDU
3211
* then maybe we want to still queue two non-aggregate frames
3212
* to the hardware. Again with the per-TID policy
3213
* configuration..)
3214
*
3215
* Otherwise, schedule the TID.
3216
*/
3217
/* XXX TXQ locking */
3218
if (txq->axq_depth + txq->fifo.axq_depth == 0) {
3219
bf = ATH_TID_FIRST(atid);
3220
ATH_TID_REMOVE(atid, bf, bf_list);
3221
3222
/*
3223
* Ensure it's definitely treated as a non-AMPDU
3224
* frame - this information may have been left
3225
* over from a previous attempt.
3226
*/
3227
bf->bf_state.bfs_aggr = 0;
3228
bf->bf_state.bfs_nframes = 1;
3229
3230
/* Queue to the hardware */
3231
ath_tx_xmit_aggr(sc, an, txq, bf);
3232
DPRINTF(sc, ATH_DEBUG_SW_TX,
3233
"%s: xmit_aggr\n",
3234
__func__);
3235
} else {
3236
DPRINTF(sc, ATH_DEBUG_SW_TX,
3237
"%s: ampdu; swq'ing\n",
3238
__func__);
3239
3240
ath_tx_tid_sched(sc, atid);
3241
}
3242
/*
3243
* If we're not doing A-MPDU, be prepared to direct dispatch
3244
* up to both limits if possible. This particular corner
3245
* case may end up with packet starvation between aggregate
3246
* traffic and non-aggregate traffic: we want to ensure
3247
* that non-aggregate stations get a few frames queued to the
3248
* hardware before the aggregate station(s) get their chance.
3249
*
3250
* So if you only ever see a couple of frames direct dispatched
3251
* to the hardware from a non-AMPDU client, check both here
3252
* and in the software queue dispatcher to ensure that those
3253
* non-AMPDU stations get a fair chance to transmit.
3254
*/
3255
/* XXX TXQ locking */
3256
} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3257
(txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3258
/* AMPDU not running, attempt direct dispatch */
3259
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3260
/* See if clrdmask needs to be set */
3261
ath_tx_update_clrdmask(sc, atid, bf);
3262
3263
/*
3264
* Update the current leak count if
3265
* we're leaking frames; and set the
3266
* MORE flag as appropriate.
3267
*/
3268
ath_tx_leak_count_update(sc, atid, bf);
3269
3270
/*
3271
* Dispatch the frame.
3272
*/
3273
ath_tx_xmit_normal(sc, txq, bf);
3274
} else {
3275
/* Busy; queue */
3276
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3277
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3278
ath_tx_tid_sched(sc, atid);
3279
}
3280
}
3281
3282
/*
3283
* Only set the clrdmask bit if none of the nodes are currently
3284
* filtered.
3285
*
3286
* XXX TODO: go through all the callers and check to see
3287
* which are being called in the context of looping over all
3288
* TIDs (eg, if all tids are being paused, resumed, etc.)
3289
* That'll avoid O(n^2) complexity here.
3290
*/
3291
static void
3292
ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3293
{
3294
int i;
3295
3296
ATH_TX_LOCK_ASSERT(sc);
3297
3298
for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3299
if (an->an_tid[i].isfiltered == 1)
3300
return;
3301
}
3302
an->clrdmask = 1;
3303
}
3304
3305
/*
3306
* Configure the per-TID node state.
3307
*
3308
* This likely belongs in if_ath_node.c but I can't think of anywhere
3309
* else to put it just yet.
3310
*
3311
* This sets up the SLISTs and the mutex as appropriate.
3312
*/
3313
void
3314
ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3315
{
3316
int i, j;
3317
struct ath_tid *atid;
3318
3319
for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3320
atid = &an->an_tid[i];
3321
3322
/* XXX now with this bzer(), is the field 0'ing needed? */
3323
bzero(atid, sizeof(*atid));
3324
3325
TAILQ_INIT(&atid->tid_q);
3326
TAILQ_INIT(&atid->filtq.tid_q);
3327
atid->tid = i;
3328
atid->an = an;
3329
for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3330
atid->tx_buf[j] = NULL;
3331
atid->baw_head = atid->baw_tail = 0;
3332
atid->paused = 0;
3333
atid->sched = 0;
3334
atid->hwq_depth = 0;
3335
atid->cleanup_inprogress = 0;
3336
if (i == IEEE80211_NONQOS_TID)
3337
atid->ac = ATH_NONQOS_TID_AC;
3338
else
3339
atid->ac = TID_TO_WME_AC(i);
3340
}
3341
an->clrdmask = 1; /* Always start by setting this bit */
3342
}
3343
3344
/*
3345
* Pause the current TID. This stops packets from being transmitted
3346
* on it.
3347
*
3348
* Since this is also called from upper layers as well as the driver,
3349
* it will get the TID lock.
3350
*/
3351
static void
3352
ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3353
{
3354
3355
ATH_TX_LOCK_ASSERT(sc);
3356
tid->paused++;
3357
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3358
__func__,
3359
tid->an->an_node.ni_macaddr, ":",
3360
tid->tid,
3361
tid->paused);
3362
}
3363
3364
/*
3365
* Unpause the current TID, and schedule it if needed.
3366
*/
3367
static void
3368
ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3369
{
3370
ATH_TX_LOCK_ASSERT(sc);
3371
3372
/*
3373
* There's some odd places where ath_tx_tid_resume() is called
3374
* when it shouldn't be; this works around that particular issue
3375
* until it's actually resolved.
3376
*/
3377
if (tid->paused == 0) {
3378
device_printf(sc->sc_dev,
3379
"%s: [%6D]: tid=%d, paused=0?\n",
3380
__func__,
3381
tid->an->an_node.ni_macaddr, ":",
3382
tid->tid);
3383
} else {
3384
tid->paused--;
3385
}
3386
3387
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3388
"%s: [%6D]: tid=%d, unpaused = %d\n",
3389
__func__,
3390
tid->an->an_node.ni_macaddr, ":",
3391
tid->tid,
3392
tid->paused);
3393
3394
if (tid->paused)
3395
return;
3396
3397
/*
3398
* Override the clrdmask configuration for the next frame
3399
* from this TID, just to get the ball rolling.
3400
*/
3401
ath_tx_set_clrdmask(sc, tid->an);
3402
3403
if (tid->axq_depth == 0)
3404
return;
3405
3406
/* XXX isfiltered shouldn't ever be 0 at this point */
3407
if (tid->isfiltered == 1) {
3408
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3409
__func__);
3410
return;
3411
}
3412
3413
ath_tx_tid_sched(sc, tid);
3414
3415
/*
3416
* Queue the software TX scheduler.
3417
*/
3418
ath_tx_swq_kick(sc);
3419
}
3420
3421
/*
3422
* Add the given ath_buf to the TID filtered frame list.
3423
* This requires the TID be filtered.
3424
*/
3425
static void
3426
ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3427
struct ath_buf *bf)
3428
{
3429
3430
ATH_TX_LOCK_ASSERT(sc);
3431
3432
if (!tid->isfiltered)
3433
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3434
__func__);
3435
3436
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3437
3438
/* Set the retry bit and bump the retry counter */
3439
ath_tx_set_retry(sc, bf);
3440
sc->sc_stats.ast_tx_swfiltered++;
3441
3442
ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3443
}
3444
3445
/*
3446
* Handle a completed filtered frame from the given TID.
3447
* This just enables/pauses the filtered frame state if required
3448
* and appends the filtered frame to the filtered queue.
3449
*/
3450
static void
3451
ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3452
struct ath_buf *bf)
3453
{
3454
3455
ATH_TX_LOCK_ASSERT(sc);
3456
3457
if (! tid->isfiltered) {
3458
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3459
__func__, tid->tid);
3460
tid->isfiltered = 1;
3461
ath_tx_tid_pause(sc, tid);
3462
}
3463
3464
/* Add the frame to the filter queue */
3465
ath_tx_tid_filt_addbuf(sc, tid, bf);
3466
}
3467
3468
/*
3469
* Complete the filtered frame TX completion.
3470
*
3471
* If there are no more frames in the hardware queue, unpause/unfilter
3472
* the TID if applicable. Otherwise we will wait for a node PS transition
3473
* to unfilter.
3474
*/
3475
static void
3476
ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3477
{
3478
struct ath_buf *bf;
3479
int do_resume = 0;
3480
3481
ATH_TX_LOCK_ASSERT(sc);
3482
3483
if (tid->hwq_depth != 0)
3484
return;
3485
3486
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3487
__func__, tid->tid);
3488
if (tid->isfiltered == 1) {
3489
tid->isfiltered = 0;
3490
do_resume = 1;
3491
}
3492
3493
/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3494
ath_tx_set_clrdmask(sc, tid->an);
3495
3496
/* XXX this is really quite inefficient */
3497
while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3498
ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3499
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3500
}
3501
3502
/* And only resume if we had paused before */
3503
if (do_resume)
3504
ath_tx_tid_resume(sc, tid);
3505
}
3506
3507
/*
3508
* Called when a single (aggregate or otherwise) frame is completed.
3509
*
3510
* Returns 0 if the buffer could be added to the filtered list
3511
* (cloned or otherwise), 1 if the buffer couldn't be added to the
3512
* filtered list (failed clone; expired retry) and the caller should
3513
* free it and handle it like a failure (eg by sending a BAR.)
3514
*
3515
* since the buffer may be cloned, bf must be not touched after this
3516
* if the return value is 0.
3517
*/
3518
static int
3519
ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3520
struct ath_buf *bf)
3521
{
3522
struct ath_buf *nbf;
3523
int retval;
3524
3525
ATH_TX_LOCK_ASSERT(sc);
3526
3527
/*
3528
* Don't allow a filtered frame to live forever.
3529
*/
3530
if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3531
sc->sc_stats.ast_tx_swretrymax++;
3532
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3533
"%s: bf=%p, seqno=%d, exceeded retries\n",
3534
__func__,
3535
bf,
3536
SEQNO(bf->bf_state.bfs_seqno));
3537
retval = 1; /* error */
3538
goto finish;
3539
}
3540
3541
/*
3542
* A busy buffer can't be added to the retry list.
3543
* It needs to be cloned.
3544
*/
3545
if (bf->bf_flags & ATH_BUF_BUSY) {
3546
nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3547
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3548
"%s: busy buffer clone: %p -> %p\n",
3549
__func__, bf, nbf);
3550
} else {
3551
nbf = bf;
3552
}
3553
3554
if (nbf == NULL) {
3555
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3556
"%s: busy buffer couldn't be cloned (%p)!\n",
3557
__func__, bf);
3558
retval = 1; /* error */
3559
} else {
3560
ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3561
retval = 0; /* ok */
3562
}
3563
finish:
3564
ath_tx_tid_filt_comp_complete(sc, tid);
3565
3566
return (retval);
3567
}
3568
3569
static void
3570
ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3571
struct ath_buf *bf_first, ath_bufhead *bf_q)
3572
{
3573
struct ath_buf *bf, *bf_next, *nbf;
3574
3575
ATH_TX_LOCK_ASSERT(sc);
3576
3577
bf = bf_first;
3578
while (bf) {
3579
bf_next = bf->bf_next;
3580
bf->bf_next = NULL; /* Remove it from the aggr list */
3581
3582
/*
3583
* Don't allow a filtered frame to live forever.
3584
*/
3585
if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3586
sc->sc_stats.ast_tx_swretrymax++;
3587
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3588
"%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3589
__func__,
3590
tid->tid,
3591
bf,
3592
SEQNO(bf->bf_state.bfs_seqno));
3593
TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3594
goto next;
3595
}
3596
3597
if (bf->bf_flags & ATH_BUF_BUSY) {
3598
nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3599
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3600
"%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3601
__func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3602
} else {
3603
nbf = bf;
3604
}
3605
3606
/*
3607
* If the buffer couldn't be cloned, add it to bf_q;
3608
* the caller will free the buffer(s) as required.
3609
*/
3610
if (nbf == NULL) {
3611
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3612
"%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3613
__func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3614
TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3615
} else {
3616
ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3617
}
3618
next:
3619
bf = bf_next;
3620
}
3621
3622
ath_tx_tid_filt_comp_complete(sc, tid);
3623
}
3624
3625
/*
3626
* Suspend the queue because we need to TX a BAR.
3627
*/
3628
static void
3629
ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3630
{
3631
3632
ATH_TX_LOCK_ASSERT(sc);
3633
3634
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3635
"%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3636
__func__,
3637
tid->tid,
3638
tid->bar_wait,
3639
tid->bar_tx);
3640
3641
/* We shouldn't be called when bar_tx is 1 */
3642
if (tid->bar_tx) {
3643
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3644
"%s: bar_tx is 1?!\n", __func__);
3645
}
3646
3647
/* If we've already been called, just be patient. */
3648
if (tid->bar_wait)
3649
return;
3650
3651
/* Wait! */
3652
tid->bar_wait = 1;
3653
3654
/* Only one pause, no matter how many frames fail */
3655
ath_tx_tid_pause(sc, tid);
3656
}
3657
3658
/*
3659
* We've finished with BAR handling - either we succeeded or
3660
* failed. Either way, unsuspend TX.
3661
*/
3662
static void
3663
ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3664
{
3665
3666
ATH_TX_LOCK_ASSERT(sc);
3667
3668
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3669
"%s: %6D: TID=%d, called\n",
3670
__func__,
3671
tid->an->an_node.ni_macaddr,
3672
":",
3673
tid->tid);
3674
3675
if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3676
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3677
"%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3678
__func__, tid->an->an_node.ni_macaddr, ":",
3679
tid->tid, tid->bar_tx, tid->bar_wait);
3680
}
3681
3682
tid->bar_tx = tid->bar_wait = 0;
3683
ath_tx_tid_resume(sc, tid);
3684
}
3685
3686
/*
3687
* Return whether we're ready to TX a BAR frame.
3688
*
3689
* Requires the TID lock be held.
3690
*/
3691
static int
3692
ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3693
{
3694
3695
ATH_TX_LOCK_ASSERT(sc);
3696
3697
if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3698
return (0);
3699
3700
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3701
"%s: %6D: TID=%d, bar ready\n",
3702
__func__,
3703
tid->an->an_node.ni_macaddr,
3704
":",
3705
tid->tid);
3706
3707
return (1);
3708
}
3709
3710
/*
3711
* Check whether the current TID is ready to have a BAR
3712
* TXed and if so, do the TX.
3713
*
3714
* Since the TID/TXQ lock can't be held during a call to
3715
* ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3716
* sending the BAR and locking it again.
3717
*
3718
* Eventually, the code to send the BAR should be broken out
3719
* from this routine so the lock doesn't have to be reacquired
3720
* just to be immediately dropped by the caller.
3721
*/
3722
static void
3723
ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3724
{
3725
struct ieee80211_tx_ampdu *tap;
3726
3727
ATH_TX_LOCK_ASSERT(sc);
3728
3729
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3730
"%s: %6D: TID=%d, called\n",
3731
__func__,
3732
tid->an->an_node.ni_macaddr,
3733
":",
3734
tid->tid);
3735
3736
tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3737
3738
/*
3739
* This is an error condition!
3740
*/
3741
if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3742
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3743
"%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3744
__func__, tid->an->an_node.ni_macaddr, ":",
3745
tid->tid, tid->bar_tx, tid->bar_wait);
3746
return;
3747
}
3748
3749
/* Don't do anything if we still have pending frames */
3750
if (tid->hwq_depth > 0) {
3751
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3752
"%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3753
__func__,
3754
tid->an->an_node.ni_macaddr,
3755
":",
3756
tid->tid,
3757
tid->hwq_depth);
3758
return;
3759
}
3760
3761
/* We're now about to TX */
3762
tid->bar_tx = 1;
3763
3764
/*
3765
* Override the clrdmask configuration for the next frame,
3766
* just to get the ball rolling.
3767
*/
3768
ath_tx_set_clrdmask(sc, tid->an);
3769
3770
/*
3771
* Calculate new BAW left edge, now that all frames have either
3772
* succeeded or failed.
3773
*
3774
* XXX verify this is _actually_ the valid value to begin at!
3775
*/
3776
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3777
"%s: %6D: TID=%d, new BAW left edge=%d\n",
3778
__func__,
3779
tid->an->an_node.ni_macaddr,
3780
":",
3781
tid->tid,
3782
tap->txa_start);
3783
3784
/* Try sending the BAR frame */
3785
/* We can't hold the lock here! */
3786
3787
ATH_TX_UNLOCK(sc);
3788
if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3789
/* Success? Now we wait for notification that it's done */
3790
ATH_TX_LOCK(sc);
3791
return;
3792
}
3793
3794
/* Failure? For now, warn loudly and continue */
3795
ATH_TX_LOCK(sc);
3796
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3797
"%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3798
__func__, tid->an->an_node.ni_macaddr, ":",
3799
tid->tid);
3800
ath_tx_tid_bar_unsuspend(sc, tid);
3801
}
3802
3803
static void
3804
ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3805
struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3806
{
3807
3808
ATH_TX_LOCK_ASSERT(sc);
3809
3810
/*
3811
* If the current TID is running AMPDU, update
3812
* the BAW.
3813
*/
3814
if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3815
bf->bf_state.bfs_dobaw) {
3816
/*
3817
* Only remove the frame from the BAW if it's
3818
* been transmitted at least once; this means
3819
* the frame was in the BAW to begin with.
3820
*/
3821
if (bf->bf_state.bfs_retries > 0) {
3822
ath_tx_update_baw(sc, an, tid, bf);
3823
bf->bf_state.bfs_dobaw = 0;
3824
}
3825
#if 0
3826
/*
3827
* This has become a non-fatal error now
3828
*/
3829
if (! bf->bf_state.bfs_addedbaw)
3830
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3831
"%s: wasn't added: seqno %d\n",
3832
__func__, SEQNO(bf->bf_state.bfs_seqno));
3833
#endif
3834
}
3835
3836
/* Strip it out of an aggregate list if it was in one */
3837
bf->bf_next = NULL;
3838
3839
/* Insert on the free queue to be freed by the caller */
3840
TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3841
}
3842
3843
static void
3844
ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3845
const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3846
{
3847
struct ieee80211_node *ni = &an->an_node;
3848
struct ath_txq *txq;
3849
struct ieee80211_tx_ampdu *tap;
3850
3851
txq = sc->sc_ac2q[tid->ac];
3852
tap = ath_tx_get_tx_tid(an, tid->tid);
3853
3854
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3855
"%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3856
"seqno=%d, retry=%d\n",
3857
__func__,
3858
pfx,
3859
ni->ni_macaddr,
3860
":",
3861
bf,
3862
bf->bf_state.bfs_addedbaw,
3863
bf->bf_state.bfs_dobaw,
3864
SEQNO(bf->bf_state.bfs_seqno),
3865
bf->bf_state.bfs_retries);
3866
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3867
"%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3868
__func__,
3869
pfx,
3870
ni->ni_macaddr,
3871
":",
3872
bf,
3873
txq->axq_qnum,
3874
txq->axq_depth,
3875
txq->axq_aggr_depth);
3876
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3877
"%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3878
"isfiltered=%d\n",
3879
__func__,
3880
pfx,
3881
ni->ni_macaddr,
3882
":",
3883
bf,
3884
tid->axq_depth,
3885
tid->hwq_depth,
3886
tid->bar_wait,
3887
tid->isfiltered);
3888
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3889
"%s: %s: %6D: tid %d: "
3890
"sched=%d, paused=%d, "
3891
"incomp=%d, baw_head=%d, "
3892
"baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3893
__func__,
3894
pfx,
3895
ni->ni_macaddr,
3896
":",
3897
tid->tid,
3898
tid->sched, tid->paused,
3899
tid->incomp, tid->baw_head,
3900
tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3901
ni->ni_txseqs[tid->tid]);
3902
3903
/* XXX Dump the frame, see what it is? */
3904
if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3905
ieee80211_dump_pkt(ni->ni_ic,
3906
mtod(bf->bf_m, const uint8_t *),
3907
bf->bf_m->m_len, 0, -1);
3908
}
3909
3910
/*
3911
* Free any packets currently pending in the software TX queue.
3912
*
3913
* This will be called when a node is being deleted.
3914
*
3915
* It can also be called on an active node during an interface
3916
* reset or state transition.
3917
*
3918
* (From Linux/reference):
3919
*
3920
* TODO: For frame(s) that are in the retry state, we will reuse the
3921
* sequence number(s) without setting the retry bit. The
3922
* alternative is to give up on these and BAR the receiver's window
3923
* forward.
3924
*/
3925
static void
3926
ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3927
struct ath_tid *tid, ath_bufhead *bf_cq)
3928
{
3929
struct ath_buf *bf;
3930
struct ieee80211_tx_ampdu *tap;
3931
struct ieee80211_node *ni = &an->an_node;
3932
int t;
3933
3934
tap = ath_tx_get_tx_tid(an, tid->tid);
3935
3936
ATH_TX_LOCK_ASSERT(sc);
3937
3938
/* Walk the queue, free frames */
3939
t = 0;
3940
for (;;) {
3941
bf = ATH_TID_FIRST(tid);
3942
if (bf == NULL) {
3943
break;
3944
}
3945
3946
if (t == 0) {
3947
ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3948
// t = 1;
3949
}
3950
3951
ATH_TID_REMOVE(tid, bf, bf_list);
3952
ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3953
}
3954
3955
/* And now, drain the filtered frame queue */
3956
t = 0;
3957
for (;;) {
3958
bf = ATH_TID_FILT_FIRST(tid);
3959
if (bf == NULL)
3960
break;
3961
3962
if (t == 0) {
3963
ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3964
// t = 1;
3965
}
3966
3967
ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3968
ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3969
}
3970
3971
/*
3972
* Override the clrdmask configuration for the next frame
3973
* in case there is some future transmission, just to get
3974
* the ball rolling.
3975
*
3976
* This won't hurt things if the TID is about to be freed.
3977
*/
3978
ath_tx_set_clrdmask(sc, tid->an);
3979
3980
/*
3981
* Now that it's completed, grab the TID lock and update
3982
* the sequence number and BAW window.
3983
* Because sequence numbers have been assigned to frames
3984
* that haven't been sent yet, it's entirely possible
3985
* we'll be called with some pending frames that have not
3986
* been transmitted.
3987
*
3988
* The cleaner solution is to do the sequence number allocation
3989
* when the packet is first transmitted - and thus the "retries"
3990
* check above would be enough to update the BAW/seqno.
3991
*/
3992
3993
/* But don't do it for non-QoS TIDs */
3994
if (tap) {
3995
#if 1
3996
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3997
"%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3998
__func__,
3999
ni->ni_macaddr,
4000
":",
4001
an,
4002
tid->tid,
4003
tap->txa_start);
4004
#endif
4005
ni->ni_txseqs[tid->tid] = tap->txa_start;
4006
tid->baw_tail = tid->baw_head;
4007
}
4008
}
4009
4010
/*
4011
* Reset the TID state. This must be only called once the node has
4012
* had its frames flushed from this TID, to ensure that no other
4013
* pause / unpause logic can kick in.
4014
*/
4015
static void
4016
ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
4017
{
4018
4019
#if 0
4020
tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
4021
tid->paused = tid->sched = tid->addba_tx_pending = 0;
4022
tid->incomp = tid->cleanup_inprogress = 0;
4023
#endif
4024
4025
/*
4026
* If we have a bar_wait set, we need to unpause the TID
4027
* here. Otherwise once cleanup has finished, the TID won't
4028
* have the right paused counter.
4029
*
4030
* XXX I'm not going through resume here - I don't want the
4031
* node to be rescheuled just yet. This however should be
4032
* methodized!
4033
*/
4034
if (tid->bar_wait) {
4035
if (tid->paused > 0) {
4036
tid->paused --;
4037
}
4038
}
4039
4040
/*
4041
* XXX same with a currently filtered TID.
4042
*
4043
* Since this is being called during a flush, we assume that
4044
* the filtered frame list is actually empty.
4045
*
4046
* XXX TODO: add in a check to ensure that the filtered queue
4047
* depth is actually 0!
4048
*/
4049
if (tid->isfiltered) {
4050
if (tid->paused > 0) {
4051
tid->paused --;
4052
}
4053
}
4054
4055
/*
4056
* Clear BAR, filtered frames, scheduled and ADDBA pending.
4057
* The TID may be going through cleanup from the last association
4058
* where things in the BAW are still in the hardware queue.
4059
*/
4060
tid->bar_wait = 0;
4061
tid->bar_tx = 0;
4062
tid->isfiltered = 0;
4063
tid->sched = 0;
4064
tid->addba_tx_pending = 0;
4065
4066
/*
4067
* XXX TODO: it may just be enough to walk the HWQs and mark
4068
* frames for that node as non-aggregate; or mark the ath_node
4069
* with something that indicates that aggregation is no longer
4070
* occurring. Then we can just toss the BAW complaints and
4071
* do a complete hard reset of state here - no pause, no
4072
* complete counter, etc.
4073
*/
4074
4075
}
4076
4077
/*
4078
* Flush all software queued packets for the given node.
4079
*
4080
* This occurs when a completion handler frees the last buffer
4081
* for a node, and the node is thus freed. This causes the node
4082
* to be cleaned up, which ends up calling ath_tx_node_flush.
4083
*/
4084
void
4085
ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
4086
{
4087
int tid;
4088
ath_bufhead bf_cq;
4089
struct ath_buf *bf;
4090
4091
TAILQ_INIT(&bf_cq);
4092
4093
ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
4094
&an->an_node);
4095
4096
ATH_TX_LOCK(sc);
4097
DPRINTF(sc, ATH_DEBUG_NODE,
4098
"%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
4099
"swq_depth=%d, clrdmask=%d, leak_count=%d\n",
4100
__func__,
4101
an->an_node.ni_macaddr,
4102
":",
4103
an->an_is_powersave,
4104
an->an_stack_psq,
4105
an->an_tim_set,
4106
an->an_swq_depth,
4107
an->clrdmask,
4108
an->an_leak_count);
4109
4110
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
4111
struct ath_tid *atid = &an->an_tid[tid];
4112
4113
/* Free packets */
4114
ath_tx_tid_drain(sc, an, atid, &bf_cq);
4115
4116
/* Remove this tid from the list of active tids */
4117
ath_tx_tid_unsched(sc, atid);
4118
4119
/* Reset the per-TID pause, BAR, etc state */
4120
ath_tx_tid_reset(sc, atid);
4121
}
4122
4123
/*
4124
* Clear global leak count
4125
*/
4126
an->an_leak_count = 0;
4127
ATH_TX_UNLOCK(sc);
4128
4129
/* Handle completed frames */
4130
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4131
TAILQ_REMOVE(&bf_cq, bf, bf_list);
4132
ath_tx_default_comp(sc, bf, 0);
4133
}
4134
}
4135
4136
/*
4137
* Drain all the software TXQs currently with traffic queued.
4138
*/
4139
void
4140
ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4141
{
4142
struct ath_tid *tid;
4143
ath_bufhead bf_cq;
4144
struct ath_buf *bf;
4145
4146
TAILQ_INIT(&bf_cq);
4147
ATH_TX_LOCK(sc);
4148
4149
/*
4150
* Iterate over all active tids for the given txq,
4151
* flushing and unsched'ing them
4152
*/
4153
while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4154
tid = TAILQ_FIRST(&txq->axq_tidq);
4155
ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4156
ath_tx_tid_unsched(sc, tid);
4157
}
4158
4159
ATH_TX_UNLOCK(sc);
4160
4161
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4162
TAILQ_REMOVE(&bf_cq, bf, bf_list);
4163
ath_tx_default_comp(sc, bf, 0);
4164
}
4165
}
4166
4167
/*
4168
* Handle completion of non-aggregate session frames.
4169
*
4170
* This (currently) doesn't implement software retransmission of
4171
* non-aggregate frames!
4172
*
4173
* Software retransmission of non-aggregate frames needs to obey
4174
* the strict sequence number ordering, and drop any frames that
4175
* will fail this.
4176
*
4177
* For now, filtered frames and frame transmission will cause
4178
* all kinds of issues. So we don't support them.
4179
*
4180
* So anyone queuing frames via ath_tx_normal_xmit() or
4181
* ath_tx_hw_queue_norm() must override and set CLRDMASK.
4182
*/
4183
void
4184
ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4185
{
4186
struct ieee80211_node *ni = bf->bf_node;
4187
struct ath_node *an = ATH_NODE(ni);
4188
int tid = bf->bf_state.bfs_tid;
4189
struct ath_tid *atid = &an->an_tid[tid];
4190
struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4191
4192
/* The TID state is protected behind the TXQ lock */
4193
ATH_TX_LOCK(sc);
4194
4195
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4196
__func__, bf, fail, atid->hwq_depth - 1);
4197
4198
atid->hwq_depth--;
4199
4200
#if 0
4201
/*
4202
* If the frame was filtered, stick it on the filter frame
4203
* queue and complain about it. It shouldn't happen!
4204
*/
4205
if ((ts->ts_status & HAL_TXERR_FILT) ||
4206
(ts->ts_status != 0 && atid->isfiltered)) {
4207
DPRINTF(sc, ATH_DEBUG_SW_TX,
4208
"%s: isfiltered=%d, ts_status=%d: huh?\n",
4209
__func__,
4210
atid->isfiltered,
4211
ts->ts_status);
4212
ath_tx_tid_filt_comp_buf(sc, atid, bf);
4213
}
4214
#endif
4215
if (atid->isfiltered)
4216
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4217
if (atid->hwq_depth < 0)
4218
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4219
__func__, atid->hwq_depth);
4220
4221
/* If the TID is being cleaned up, track things */
4222
/* XXX refactor! */
4223
if (atid->cleanup_inprogress) {
4224
atid->incomp--;
4225
if (atid->incomp == 0) {
4226
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4227
"%s: TID %d: cleaned up! resume!\n",
4228
__func__, tid);
4229
atid->cleanup_inprogress = 0;
4230
ath_tx_tid_resume(sc, atid);
4231
}
4232
}
4233
4234
/*
4235
* If the queue is filtered, potentially mark it as complete
4236
* and reschedule it as needed.
4237
*
4238
* This is required as there may be a subsequent TX descriptor
4239
* for this end-node that has CLRDMASK set, so it's quite possible
4240
* that a filtered frame will be followed by a non-filtered
4241
* (complete or otherwise) frame.
4242
*
4243
* XXX should we do this before we complete the frame?
4244
*/
4245
if (atid->isfiltered)
4246
ath_tx_tid_filt_comp_complete(sc, atid);
4247
ATH_TX_UNLOCK(sc);
4248
4249
/*
4250
* punt to rate control if we're not being cleaned up
4251
* during a hw queue drain and the frame wanted an ACK.
4252
*/
4253
if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4254
ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4255
ts,
4256
bf->bf_state.bfs_pktlen,
4257
bf->bf_state.bfs_pktlen,
4258
1, (ts->ts_status == 0) ? 0 : 1);
4259
4260
ath_tx_default_comp(sc, bf, fail);
4261
}
4262
4263
/*
4264
* Handle cleanup of aggregate session packets that aren't
4265
* an A-MPDU.
4266
*
4267
* There's no need to update the BAW here - the session is being
4268
* torn down.
4269
*/
4270
static void
4271
ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4272
{
4273
struct ieee80211_node *ni = bf->bf_node;
4274
struct ath_node *an = ATH_NODE(ni);
4275
int tid = bf->bf_state.bfs_tid;
4276
struct ath_tid *atid = &an->an_tid[tid];
4277
4278
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4279
__func__, tid, atid->incomp);
4280
4281
ATH_TX_LOCK(sc);
4282
atid->incomp--;
4283
4284
/* XXX refactor! */
4285
if (bf->bf_state.bfs_dobaw) {
4286
ath_tx_update_baw(sc, an, atid, bf);
4287
if (!bf->bf_state.bfs_addedbaw)
4288
DPRINTF(sc, ATH_DEBUG_SW_TX,
4289
"%s: wasn't added: seqno %d\n",
4290
__func__, SEQNO(bf->bf_state.bfs_seqno));
4291
}
4292
4293
if (atid->incomp == 0) {
4294
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4295
"%s: TID %d: cleaned up! resume!\n",
4296
__func__, tid);
4297
atid->cleanup_inprogress = 0;
4298
ath_tx_tid_resume(sc, atid);
4299
}
4300
ATH_TX_UNLOCK(sc);
4301
4302
ath_tx_default_comp(sc, bf, 0);
4303
}
4304
4305
/*
4306
* This as it currently stands is a bit dumb. Ideally we'd just
4307
* fail the frame the normal way and have it permanently fail
4308
* via the normal aggregate completion path.
4309
*/
4310
static void
4311
ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4312
int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4313
{
4314
struct ath_tid *atid = &an->an_tid[tid];
4315
struct ath_buf *bf, *bf_next;
4316
4317
ATH_TX_LOCK_ASSERT(sc);
4318
4319
/*
4320
* Remove this frame from the queue.
4321
*/
4322
ATH_TID_REMOVE(atid, bf_head, bf_list);
4323
4324
/*
4325
* Loop over all the frames in the aggregate.
4326
*/
4327
bf = bf_head;
4328
while (bf != NULL) {
4329
bf_next = bf->bf_next; /* next aggregate frame, or NULL */
4330
4331
/*
4332
* If it's been added to the BAW we need to kick
4333
* it out of the BAW before we continue.
4334
*
4335
* XXX if it's an aggregate, assert that it's in the
4336
* BAW - we shouldn't have it be in an aggregate
4337
* otherwise!
4338
*/
4339
if (bf->bf_state.bfs_addedbaw) {
4340
ath_tx_update_baw(sc, an, atid, bf);
4341
bf->bf_state.bfs_dobaw = 0;
4342
}
4343
4344
/*
4345
* Give it the default completion handler.
4346
*/
4347
bf->bf_comp = ath_tx_normal_comp;
4348
bf->bf_next = NULL;
4349
4350
/*
4351
* Add it to the list to free.
4352
*/
4353
TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4354
4355
/*
4356
* Now advance to the next frame in the aggregate.
4357
*/
4358
bf = bf_next;
4359
}
4360
}
4361
4362
/*
4363
* Performs transmit side cleanup when TID changes from aggregated to
4364
* unaggregated and during reassociation.
4365
*
4366
* For now, this just tosses everything from the TID software queue
4367
* whether or not it has been retried and marks the TID as
4368
* pending completion if there's anything for this TID queued to
4369
* the hardware.
4370
*
4371
* The caller is responsible for pausing the TID and unpausing the
4372
* TID if no cleanup was required. Otherwise the cleanup path will
4373
* unpause the TID once the last hardware queued frame is completed.
4374
*/
4375
static void
4376
ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4377
ath_bufhead *bf_cq)
4378
{
4379
struct ath_tid *atid = &an->an_tid[tid];
4380
struct ath_buf *bf, *bf_next;
4381
4382
ATH_TX_LOCK_ASSERT(sc);
4383
4384
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4385
"%s: TID %d: called; inprogress=%d\n", __func__, tid,
4386
atid->cleanup_inprogress);
4387
4388
/*
4389
* Move the filtered frames to the TX queue, before
4390
* we run off and discard/process things.
4391
*/
4392
4393
/* XXX this is really quite inefficient */
4394
while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4395
ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4396
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4397
}
4398
4399
/*
4400
* Update the frames in the software TX queue:
4401
*
4402
* + Discard retry frames in the queue
4403
* + Fix the completion function to be non-aggregate
4404
*/
4405
bf = ATH_TID_FIRST(atid);
4406
while (bf) {
4407
/*
4408
* Grab the next frame in the list, we may
4409
* be fiddling with the list.
4410
*/
4411
bf_next = TAILQ_NEXT(bf, bf_list);
4412
4413
/*
4414
* Free the frame and all subframes.
4415
*/
4416
ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4417
4418
/*
4419
* Next frame!
4420
*/
4421
bf = bf_next;
4422
}
4423
4424
/*
4425
* If there's anything in the hardware queue we wait
4426
* for the TID HWQ to empty.
4427
*/
4428
if (atid->hwq_depth > 0) {
4429
/*
4430
* XXX how about we kill atid->incomp, and instead
4431
* replace it with a macro that checks that atid->hwq_depth
4432
* is 0?
4433
*/
4434
atid->incomp = atid->hwq_depth;
4435
atid->cleanup_inprogress = 1;
4436
}
4437
4438
if (atid->cleanup_inprogress)
4439
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4440
"%s: TID %d: cleanup needed: %d packets\n",
4441
__func__, tid, atid->incomp);
4442
4443
/* Owner now must free completed frames */
4444
}
4445
4446
static struct ath_buf *
4447
ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4448
struct ath_tid *tid, struct ath_buf *bf)
4449
{
4450
struct ath_buf *nbf;
4451
int error;
4452
4453
/*
4454
* Clone the buffer. This will handle the dma unmap and
4455
* copy the node reference to the new buffer. If this
4456
* works out, 'bf' will have no DMA mapping, no mbuf
4457
* pointer and no node reference.
4458
*/
4459
nbf = ath_buf_clone(sc, bf);
4460
4461
#if 0
4462
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4463
__func__);
4464
#endif
4465
4466
if (nbf == NULL) {
4467
/* Failed to clone */
4468
DPRINTF(sc, ATH_DEBUG_XMIT,
4469
"%s: failed to clone a busy buffer\n",
4470
__func__);
4471
return NULL;
4472
}
4473
4474
/* Setup the dma for the new buffer */
4475
error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4476
if (error != 0) {
4477
DPRINTF(sc, ATH_DEBUG_XMIT,
4478
"%s: failed to setup dma for clone\n",
4479
__func__);
4480
/*
4481
* Put this at the head of the list, not tail;
4482
* that way it doesn't interfere with the
4483
* busy buffer logic (which uses the tail of
4484
* the list.)
4485
*/
4486
ATH_TXBUF_LOCK(sc);
4487
ath_returnbuf_head(sc, nbf);
4488
ATH_TXBUF_UNLOCK(sc);
4489
return NULL;
4490
}
4491
4492
/* Update BAW if required, before we free the original buf */
4493
if (bf->bf_state.bfs_dobaw)
4494
ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4495
4496
/* Free original buffer; return new buffer */
4497
ath_freebuf(sc, bf);
4498
4499
return nbf;
4500
}
4501
4502
/*
4503
* Handle retrying an unaggregate frame in an aggregate
4504
* session.
4505
*
4506
* If too many retries occur, pause the TID, wait for
4507
* any further retransmits (as there's no reason why
4508
* non-aggregate frames in an aggregate session are
4509
* transmitted in-order; they just have to be in-BAW)
4510
* and then queue a BAR.
4511
*/
4512
static void
4513
ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4514
{
4515
struct ieee80211_node *ni = bf->bf_node;
4516
struct ath_node *an = ATH_NODE(ni);
4517
int tid = bf->bf_state.bfs_tid;
4518
struct ath_tid *atid = &an->an_tid[tid];
4519
struct ieee80211_tx_ampdu *tap;
4520
4521
ATH_TX_LOCK(sc);
4522
4523
tap = ath_tx_get_tx_tid(an, tid);
4524
4525
/*
4526
* If the buffer is marked as busy, we can't directly
4527
* reuse it. Instead, try to clone the buffer.
4528
* If the clone is successful, recycle the old buffer.
4529
* If the clone is unsuccessful, set bfs_retries to max
4530
* to force the next bit of code to free the buffer
4531
* for us.
4532
*/
4533
if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4534
(bf->bf_flags & ATH_BUF_BUSY)) {
4535
struct ath_buf *nbf;
4536
nbf = ath_tx_retry_clone(sc, an, atid, bf);
4537
if (nbf)
4538
/* bf has been freed at this point */
4539
bf = nbf;
4540
else
4541
bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4542
}
4543
4544
if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4545
DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4546
"%s: exceeded retries; seqno %d\n",
4547
__func__, SEQNO(bf->bf_state.bfs_seqno));
4548
sc->sc_stats.ast_tx_swretrymax++;
4549
4550
/* Update BAW anyway */
4551
if (bf->bf_state.bfs_dobaw) {
4552
ath_tx_update_baw(sc, an, atid, bf);
4553
if (! bf->bf_state.bfs_addedbaw)
4554
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4555
"%s: wasn't added: seqno %d\n",
4556
__func__, SEQNO(bf->bf_state.bfs_seqno));
4557
}
4558
bf->bf_state.bfs_dobaw = 0;
4559
4560
/* Suspend the TX queue and get ready to send the BAR */
4561
ath_tx_tid_bar_suspend(sc, atid);
4562
4563
/* Send the BAR if there are no other frames waiting */
4564
if (ath_tx_tid_bar_tx_ready(sc, atid))
4565
ath_tx_tid_bar_tx(sc, atid);
4566
4567
ATH_TX_UNLOCK(sc);
4568
4569
/* Free buffer, bf is free after this call */
4570
ath_tx_default_comp(sc, bf, 0);
4571
return;
4572
}
4573
4574
/*
4575
* This increments the retry counter as well as
4576
* sets the retry flag in the ath_buf and packet
4577
* body.
4578
*/
4579
ath_tx_set_retry(sc, bf);
4580
sc->sc_stats.ast_tx_swretries++;
4581
4582
/*
4583
* Insert this at the head of the queue, so it's
4584
* retried before any current/subsequent frames.
4585
*/
4586
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4587
ath_tx_tid_sched(sc, atid);
4588
/* Send the BAR if there are no other frames waiting */
4589
if (ath_tx_tid_bar_tx_ready(sc, atid))
4590
ath_tx_tid_bar_tx(sc, atid);
4591
4592
ATH_TX_UNLOCK(sc);
4593
}
4594
4595
/*
4596
* Common code for aggregate excessive retry/subframe retry.
4597
* If retrying, queues buffers to bf_q. If not, frees the
4598
* buffers.
4599
*
4600
* XXX should unify this with ath_tx_aggr_retry_unaggr()
4601
*/
4602
static int
4603
ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4604
ath_bufhead *bf_q)
4605
{
4606
struct ieee80211_node *ni = bf->bf_node;
4607
struct ath_node *an = ATH_NODE(ni);
4608
int tid = bf->bf_state.bfs_tid;
4609
struct ath_tid *atid = &an->an_tid[tid];
4610
4611
ATH_TX_LOCK_ASSERT(sc);
4612
4613
/* XXX clr11naggr should be done for all subframes */
4614
ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4615
ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4616
4617
/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4618
4619
/*
4620
* If the buffer is marked as busy, we can't directly
4621
* reuse it. Instead, try to clone the buffer.
4622
* If the clone is successful, recycle the old buffer.
4623
* If the clone is unsuccessful, set bfs_retries to max
4624
* to force the next bit of code to free the buffer
4625
* for us.
4626
*/
4627
if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4628
(bf->bf_flags & ATH_BUF_BUSY)) {
4629
struct ath_buf *nbf;
4630
nbf = ath_tx_retry_clone(sc, an, atid, bf);
4631
if (nbf)
4632
/* bf has been freed at this point */
4633
bf = nbf;
4634
else
4635
bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4636
}
4637
4638
if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4639
sc->sc_stats.ast_tx_swretrymax++;
4640
DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4641
"%s: max retries: seqno %d\n",
4642
__func__, SEQNO(bf->bf_state.bfs_seqno));
4643
ath_tx_update_baw(sc, an, atid, bf);
4644
if (!bf->bf_state.bfs_addedbaw)
4645
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4646
"%s: wasn't added: seqno %d\n",
4647
__func__, SEQNO(bf->bf_state.bfs_seqno));
4648
bf->bf_state.bfs_dobaw = 0;
4649
return 1;
4650
}
4651
4652
ath_tx_set_retry(sc, bf);
4653
sc->sc_stats.ast_tx_swretries++;
4654
bf->bf_next = NULL; /* Just to make sure */
4655
4656
/* Clear the aggregate state */
4657
bf->bf_state.bfs_aggr = 0;
4658
bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
4659
bf->bf_state.bfs_nframes = 1;
4660
4661
TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4662
return 0;
4663
}
4664
4665
/*
4666
* error pkt completion for an aggregate destination
4667
*/
4668
static void
4669
ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4670
struct ath_tid *tid)
4671
{
4672
struct ieee80211_node *ni = bf_first->bf_node;
4673
struct ath_node *an = ATH_NODE(ni);
4674
struct ath_buf *bf_next, *bf;
4675
ath_bufhead bf_q;
4676
int drops = 0;
4677
struct ieee80211_tx_ampdu *tap;
4678
ath_bufhead bf_cq;
4679
4680
TAILQ_INIT(&bf_q);
4681
TAILQ_INIT(&bf_cq);
4682
4683
/*
4684
* Update rate control - all frames have failed.
4685
*/
4686
ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4687
&bf_first->bf_status.ds_txstat,
4688
bf_first->bf_state.bfs_al,
4689
bf_first->bf_state.bfs_rc_maxpktlen,
4690
bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4691
4692
ATH_TX_LOCK(sc);
4693
tap = ath_tx_get_tx_tid(an, tid->tid);
4694
sc->sc_stats.ast_tx_aggr_failall++;
4695
4696
/* Retry all subframes */
4697
bf = bf_first;
4698
while (bf) {
4699
bf_next = bf->bf_next;
4700
bf->bf_next = NULL; /* Remove it from the aggr list */
4701
sc->sc_stats.ast_tx_aggr_fail++;
4702
if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4703
drops++;
4704
bf->bf_next = NULL;
4705
TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4706
}
4707
bf = bf_next;
4708
}
4709
4710
/* Prepend all frames to the beginning of the queue */
4711
while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4712
TAILQ_REMOVE(&bf_q, bf, bf_list);
4713
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4714
}
4715
4716
/*
4717
* Schedule the TID to be re-tried.
4718
*/
4719
ath_tx_tid_sched(sc, tid);
4720
4721
/*
4722
* send bar if we dropped any frames
4723
*
4724
* Keep the txq lock held for now, as we need to ensure
4725
* that ni_txseqs[] is consistent (as it's being updated
4726
* in the ifnet TX context or raw TX context.)
4727
*/
4728
if (drops) {
4729
/* Suspend the TX queue and get ready to send the BAR */
4730
ath_tx_tid_bar_suspend(sc, tid);
4731
}
4732
4733
/*
4734
* Send BAR if required
4735
*/
4736
if (ath_tx_tid_bar_tx_ready(sc, tid))
4737
ath_tx_tid_bar_tx(sc, tid);
4738
4739
ATH_TX_UNLOCK(sc);
4740
4741
/* Complete frames which errored out */
4742
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4743
TAILQ_REMOVE(&bf_cq, bf, bf_list);
4744
ath_tx_default_comp(sc, bf, 0);
4745
}
4746
}
4747
4748
/*
4749
* Handle clean-up of packets from an aggregate list.
4750
*
4751
* There's no need to update the BAW here - the session is being
4752
* torn down.
4753
*/
4754
static void
4755
ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4756
{
4757
struct ath_buf *bf, *bf_next;
4758
struct ieee80211_node *ni = bf_first->bf_node;
4759
struct ath_node *an = ATH_NODE(ni);
4760
int tid = bf_first->bf_state.bfs_tid;
4761
struct ath_tid *atid = &an->an_tid[tid];
4762
4763
ATH_TX_LOCK(sc);
4764
4765
/* update incomp */
4766
atid->incomp--;
4767
4768
/* Update the BAW */
4769
bf = bf_first;
4770
while (bf) {
4771
/* XXX refactor! */
4772
if (bf->bf_state.bfs_dobaw) {
4773
ath_tx_update_baw(sc, an, atid, bf);
4774
if (!bf->bf_state.bfs_addedbaw)
4775
DPRINTF(sc, ATH_DEBUG_SW_TX,
4776
"%s: wasn't added: seqno %d\n",
4777
__func__, SEQNO(bf->bf_state.bfs_seqno));
4778
}
4779
bf = bf->bf_next;
4780
}
4781
4782
if (atid->incomp == 0) {
4783
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4784
"%s: TID %d: cleaned up! resume!\n",
4785
__func__, tid);
4786
atid->cleanup_inprogress = 0;
4787
ath_tx_tid_resume(sc, atid);
4788
}
4789
4790
/* Send BAR if required */
4791
/* XXX why would we send a BAR when transitioning to non-aggregation? */
4792
/*
4793
* XXX TODO: we should likely just tear down the BAR state here,
4794
* rather than sending a BAR.
4795
*/
4796
if (ath_tx_tid_bar_tx_ready(sc, atid))
4797
ath_tx_tid_bar_tx(sc, atid);
4798
4799
ATH_TX_UNLOCK(sc);
4800
4801
/* Handle frame completion as individual frames */
4802
bf = bf_first;
4803
while (bf) {
4804
bf_next = bf->bf_next;
4805
bf->bf_next = NULL;
4806
ath_tx_default_comp(sc, bf, 1);
4807
bf = bf_next;
4808
}
4809
}
4810
4811
/*
4812
* Handle completion of an set of aggregate frames.
4813
*
4814
* Note: the completion handler is the last descriptor in the aggregate,
4815
* not the last descriptor in the first frame.
4816
*/
4817
static void
4818
ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4819
int fail)
4820
{
4821
//struct ath_desc *ds = bf->bf_lastds;
4822
struct ieee80211_node *ni = bf_first->bf_node;
4823
struct ath_node *an = ATH_NODE(ni);
4824
int tid = bf_first->bf_state.bfs_tid;
4825
struct ath_tid *atid = &an->an_tid[tid];
4826
struct ath_tx_status ts;
4827
struct ieee80211_tx_ampdu *tap;
4828
ath_bufhead bf_q;
4829
ath_bufhead bf_cq;
4830
int seq_st, tx_ok;
4831
int hasba, isaggr;
4832
uint32_t ba[2];
4833
struct ath_buf *bf, *bf_next;
4834
int ba_index;
4835
int drops = 0;
4836
int nframes = 0, nbad = 0, nf;
4837
int pktlen;
4838
int agglen, rc_agglen;
4839
/* XXX there's too much on the stack? */
4840
struct ath_rc_series rc[ATH_RC_NUM];
4841
int txseq;
4842
4843
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4844
__func__, atid->hwq_depth);
4845
4846
/*
4847
* Take a copy; this may be needed -after- bf_first
4848
* has been completed and freed.
4849
*/
4850
ts = bf_first->bf_status.ds_txstat;
4851
agglen = bf_first->bf_state.bfs_al;
4852
rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;
4853
4854
TAILQ_INIT(&bf_q);
4855
TAILQ_INIT(&bf_cq);
4856
4857
/* The TID state is kept behind the TXQ lock */
4858
ATH_TX_LOCK(sc);
4859
4860
atid->hwq_depth--;
4861
if (atid->hwq_depth < 0)
4862
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4863
__func__, atid->hwq_depth);
4864
4865
/*
4866
* If the TID is filtered, handle completing the filter
4867
* transition before potentially kicking it to the cleanup
4868
* function.
4869
*
4870
* XXX this is duplicate work, ew.
4871
*/
4872
if (atid->isfiltered)
4873
ath_tx_tid_filt_comp_complete(sc, atid);
4874
4875
/*
4876
* Punt cleanup to the relevant function, not our problem now
4877
*/
4878
if (atid->cleanup_inprogress) {
4879
if (atid->isfiltered)
4880
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4881
"%s: isfiltered=1, normal_comp?\n",
4882
__func__);
4883
ATH_TX_UNLOCK(sc);
4884
ath_tx_comp_cleanup_aggr(sc, bf_first);
4885
return;
4886
}
4887
4888
/*
4889
* If the frame is filtered, transition to filtered frame
4890
* mode and add this to the filtered frame list.
4891
*
4892
* XXX TODO: figure out how this interoperates with
4893
* BAR, pause and cleanup states.
4894
*/
4895
if ((ts.ts_status & HAL_TXERR_FILT) ||
4896
(ts.ts_status != 0 && atid->isfiltered)) {
4897
if (fail != 0)
4898
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4899
"%s: isfiltered=1, fail=%d\n", __func__, fail);
4900
ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4901
4902
/* Remove from BAW */
4903
TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4904
if (bf->bf_state.bfs_addedbaw)
4905
drops++;
4906
if (bf->bf_state.bfs_dobaw) {
4907
ath_tx_update_baw(sc, an, atid, bf);
4908
if (!bf->bf_state.bfs_addedbaw)
4909
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4910
"%s: wasn't added: seqno %d\n",
4911
__func__,
4912
SEQNO(bf->bf_state.bfs_seqno));
4913
}
4914
bf->bf_state.bfs_dobaw = 0;
4915
}
4916
/*
4917
* If any intermediate frames in the BAW were dropped when
4918
* handling filtering things, send a BAR.
4919
*/
4920
if (drops)
4921
ath_tx_tid_bar_suspend(sc, atid);
4922
4923
/*
4924
* Finish up by sending a BAR if required and freeing
4925
* the frames outside of the TX lock.
4926
*/
4927
goto finish_send_bar;
4928
}
4929
4930
/*
4931
* XXX for now, use the first frame in the aggregate for
4932
* XXX rate control completion; it's at least consistent.
4933
*/
4934
pktlen = bf_first->bf_state.bfs_pktlen;
4935
4936
/*
4937
* Handle errors first!
4938
*
4939
* Here, handle _any_ error as a "exceeded retries" error.
4940
* Later on (when filtered frames are to be specially handled)
4941
* it'll have to be expanded.
4942
*/
4943
#if 0
4944
if (ts.ts_status & HAL_TXERR_XRETRY) {
4945
#endif
4946
if (ts.ts_status != 0) {
4947
ATH_TX_UNLOCK(sc);
4948
ath_tx_comp_aggr_error(sc, bf_first, atid);
4949
return;
4950
}
4951
4952
tap = ath_tx_get_tx_tid(an, tid);
4953
4954
/*
4955
* extract starting sequence and block-ack bitmap
4956
*/
4957
/* XXX endian-ness of seq_st, ba? */
4958
seq_st = ts.ts_seqnum;
4959
hasba = !! (ts.ts_flags & HAL_TX_BA);
4960
tx_ok = (ts.ts_status == 0);
4961
isaggr = bf_first->bf_state.bfs_aggr;
4962
ba[0] = ts.ts_ba_low;
4963
ba[1] = ts.ts_ba_high;
4964
4965
/*
4966
* Copy the TX completion status and the rate control
4967
* series from the first descriptor, as it may be freed
4968
* before the rate control code can get its grubby fingers
4969
* into things.
4970
*/
4971
memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4972
4973
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4974
"%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4975
"isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4976
__func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4977
isaggr, seq_st, hasba, ba[0], ba[1]);
4978
4979
/*
4980
* The reference driver doesn't do this; it simply ignores
4981
* this check in its entirety.
4982
*
4983
* I've seen this occur when using iperf to send traffic
4984
* out tid 1 - the aggregate frames are all marked as TID 1,
4985
* but the TXSTATUS has TID=0. So, let's just ignore this
4986
* check.
4987
*/
4988
#if 0
4989
/* Occasionally, the MAC sends a tx status for the wrong TID. */
4990
if (tid != ts.ts_tid) {
4991
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4992
__func__, tid, ts.ts_tid);
4993
tx_ok = 0;
4994
}
4995
#endif
4996
4997
/* AR5416 BA bug; this requires an interface reset */
4998
if (isaggr && tx_ok && (! hasba)) {
4999
device_printf(sc->sc_dev,
5000
"%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
5001
"seq_st=%d\n",
5002
__func__, hasba, tx_ok, isaggr, seq_st);
5003
taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
5004
/* And as we can't really trust the BA here .. */
5005
ba[0] = 0;
5006
ba[1] = 0;
5007
seq_st = 0;
5008
#ifdef ATH_DEBUG
5009
ath_printtxbuf(sc, bf_first,
5010
sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5011
#endif
5012
}
5013
5014
/*
5015
* Walk the list of frames, figure out which ones were correctly
5016
* sent and which weren't.
5017
*/
5018
bf = bf_first;
5019
nf = bf_first->bf_state.bfs_nframes;
5020
5021
/* bf_first is going to be invalid once this list is walked */
5022
bf_first = NULL;
5023
5024
/*
5025
* Walk the list of completed frames and determine
5026
* which need to be completed and which need to be
5027
* retransmitted.
5028
*
5029
* For completed frames, the completion functions need
5030
* to be called at the end of this function as the last
5031
* node reference may free the node.
5032
*
5033
* Finally, since the TXQ lock can't be held during the
5034
* completion callback (to avoid lock recursion),
5035
* the completion calls have to be done outside of the
5036
* lock.
5037
*/
5038
while (bf) {
5039
nframes++;
5040
ba_index = ATH_BA_INDEX(seq_st,
5041
SEQNO(bf->bf_state.bfs_seqno));
5042
bf_next = bf->bf_next;
5043
bf->bf_next = NULL; /* Remove it from the aggr list */
5044
5045
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5046
"%s: checking bf=%p seqno=%d; ack=%d\n",
5047
__func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5048
ATH_BA_ISSET(ba, ba_index));
5049
5050
if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
5051
sc->sc_stats.ast_tx_aggr_ok++;
5052
ath_tx_update_baw(sc, an, atid, bf);
5053
bf->bf_state.bfs_dobaw = 0;
5054
if (!bf->bf_state.bfs_addedbaw)
5055
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5056
"%s: wasn't added: seqno %d\n",
5057
__func__, SEQNO(bf->bf_state.bfs_seqno));
5058
bf->bf_next = NULL;
5059
TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5060
} else {
5061
sc->sc_stats.ast_tx_aggr_fail++;
5062
if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
5063
drops++;
5064
bf->bf_next = NULL;
5065
TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5066
}
5067
nbad++;
5068
}
5069
bf = bf_next;
5070
}
5071
5072
/*
5073
* Now that the BAW updates have been done, unlock
5074
*
5075
* txseq is grabbed before the lock is released so we
5076
* have a consistent view of what -was- in the BAW.
5077
* Anything after this point will not yet have been
5078
* TXed.
5079
*/
5080
txseq = tap->txa_start;
5081
ATH_TX_UNLOCK(sc);
5082
5083
if (nframes != nf)
5084
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5085
"%s: num frames seen=%d; bf nframes=%d\n",
5086
__func__, nframes, nf);
5087
5088
/*
5089
* Now we know how many frames were bad, call the rate
5090
* control code.
5091
*/
5092
if (fail == 0) {
5093
ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen,
5094
nframes, nbad);
5095
}
5096
5097
/*
5098
* send bar if we dropped any frames
5099
*/
5100
if (drops) {
5101
/* Suspend the TX queue and get ready to send the BAR */
5102
ATH_TX_LOCK(sc);
5103
ath_tx_tid_bar_suspend(sc, atid);
5104
ATH_TX_UNLOCK(sc);
5105
}
5106
5107
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5108
"%s: txa_start now %d\n", __func__, tap->txa_start);
5109
5110
ATH_TX_LOCK(sc);
5111
5112
/* Prepend all frames to the beginning of the queue */
5113
while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
5114
TAILQ_REMOVE(&bf_q, bf, bf_list);
5115
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5116
}
5117
5118
/*
5119
* Reschedule to grab some further frames.
5120
*/
5121
ath_tx_tid_sched(sc, atid);
5122
5123
/*
5124
* If the queue is filtered, re-schedule as required.
5125
*
5126
* This is required as there may be a subsequent TX descriptor
5127
* for this end-node that has CLRDMASK set, so it's quite possible
5128
* that a filtered frame will be followed by a non-filtered
5129
* (complete or otherwise) frame.
5130
*
5131
* XXX should we do this before we complete the frame?
5132
*/
5133
if (atid->isfiltered)
5134
ath_tx_tid_filt_comp_complete(sc, atid);
5135
5136
finish_send_bar:
5137
5138
/*
5139
* Send BAR if required
5140
*/
5141
if (ath_tx_tid_bar_tx_ready(sc, atid))
5142
ath_tx_tid_bar_tx(sc, atid);
5143
5144
ATH_TX_UNLOCK(sc);
5145
5146
/* Do deferred completion */
5147
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5148
TAILQ_REMOVE(&bf_cq, bf, bf_list);
5149
ath_tx_default_comp(sc, bf, 0);
5150
}
5151
}
5152
5153
/*
5154
* Handle completion of unaggregated frames in an ADDBA
5155
* session.
5156
*
5157
* Fail is set to 1 if the entry is being freed via a call to
5158
* ath_tx_draintxq().
5159
*/
5160
static void
5161
ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5162
{
5163
struct ieee80211_node *ni = bf->bf_node;
5164
struct ath_node *an = ATH_NODE(ni);
5165
int tid = bf->bf_state.bfs_tid;
5166
struct ath_tid *atid = &an->an_tid[tid];
5167
struct ath_tx_status ts;
5168
int drops = 0;
5169
5170
/*
5171
* Take a copy of this; filtering/cloning the frame may free the
5172
* bf pointer.
5173
*/
5174
ts = bf->bf_status.ds_txstat;
5175
5176
/*
5177
* Update rate control status here, before we possibly
5178
* punt to retry or cleanup.
5179
*
5180
* Do it outside of the TXQ lock.
5181
*/
5182
if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5183
ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5184
&bf->bf_status.ds_txstat,
5185
bf->bf_state.bfs_pktlen,
5186
bf->bf_state.bfs_pktlen,
5187
1, (ts.ts_status == 0) ? 0 : 1);
5188
5189
/*
5190
* This is called early so atid->hwq_depth can be tracked.
5191
* This unfortunately means that it's released and regrabbed
5192
* during retry and cleanup. That's rather inefficient.
5193
*/
5194
ATH_TX_LOCK(sc);
5195
5196
if (tid == IEEE80211_NONQOS_TID)
5197
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5198
5199
DPRINTF(sc, ATH_DEBUG_SW_TX,
5200
"%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5201
__func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5202
SEQNO(bf->bf_state.bfs_seqno));
5203
5204
atid->hwq_depth--;
5205
if (atid->hwq_depth < 0)
5206
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5207
__func__, atid->hwq_depth);
5208
5209
/*
5210
* If the TID is filtered, handle completing the filter
5211
* transition before potentially kicking it to the cleanup
5212
* function.
5213
*/
5214
if (atid->isfiltered)
5215
ath_tx_tid_filt_comp_complete(sc, atid);
5216
5217
/*
5218
* If a cleanup is in progress, punt to comp_cleanup;
5219
* rather than handling it here. It's thus their
5220
* responsibility to clean up, call the completion
5221
* function in net80211, etc.
5222
*/
5223
if (atid->cleanup_inprogress) {
5224
if (atid->isfiltered)
5225
DPRINTF(sc, ATH_DEBUG_SW_TX,
5226
"%s: isfiltered=1, normal_comp?\n",
5227
__func__);
5228
ATH_TX_UNLOCK(sc);
5229
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5230
__func__);
5231
ath_tx_comp_cleanup_unaggr(sc, bf);
5232
return;
5233
}
5234
5235
/*
5236
* XXX TODO: how does cleanup, BAR and filtered frame handling
5237
* overlap?
5238
*
5239
* If the frame is filtered OR if it's any failure but
5240
* the TID is filtered, the frame must be added to the
5241
* filtered frame list.
5242
*
5243
* However - a busy buffer can't be added to the filtered
5244
* list as it will end up being recycled without having
5245
* been made available for the hardware.
5246
*/
5247
if ((ts.ts_status & HAL_TXERR_FILT) ||
5248
(ts.ts_status != 0 && atid->isfiltered)) {
5249
int freeframe;
5250
5251
if (fail != 0)
5252
DPRINTF(sc, ATH_DEBUG_SW_TX,
5253
"%s: isfiltered=1, fail=%d\n",
5254
__func__, fail);
5255
freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5256
/*
5257
* If freeframe=0 then bf is no longer ours; don't
5258
* touch it.
5259
*/
5260
if (freeframe) {
5261
/* Remove from BAW */
5262
if (bf->bf_state.bfs_addedbaw)
5263
drops++;
5264
if (bf->bf_state.bfs_dobaw) {
5265
ath_tx_update_baw(sc, an, atid, bf);
5266
if (!bf->bf_state.bfs_addedbaw)
5267
DPRINTF(sc, ATH_DEBUG_SW_TX,
5268
"%s: wasn't added: seqno %d\n",
5269
__func__, SEQNO(bf->bf_state.bfs_seqno));
5270
}
5271
bf->bf_state.bfs_dobaw = 0;
5272
}
5273
5274
/*
5275
* If the frame couldn't be filtered, treat it as a drop and
5276
* prepare to send a BAR.
5277
*/
5278
if (freeframe && drops)
5279
ath_tx_tid_bar_suspend(sc, atid);
5280
5281
/*
5282
* Send BAR if required
5283
*/
5284
if (ath_tx_tid_bar_tx_ready(sc, atid))
5285
ath_tx_tid_bar_tx(sc, atid);
5286
5287
ATH_TX_UNLOCK(sc);
5288
/*
5289
* If freeframe is set, then the frame couldn't be
5290
* cloned and bf is still valid. Just complete/free it.
5291
*/
5292
if (freeframe)
5293
ath_tx_default_comp(sc, bf, fail);
5294
5295
return;
5296
}
5297
/*
5298
* Don't bother with the retry check if all frames
5299
* are being failed (eg during queue deletion.)
5300
*/
5301
#if 0
5302
if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5303
#endif
5304
if (fail == 0 && ts.ts_status != 0) {
5305
ATH_TX_UNLOCK(sc);
5306
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5307
__func__);
5308
ath_tx_aggr_retry_unaggr(sc, bf);
5309
return;
5310
}
5311
5312
/* Success? Complete */
5313
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5314
__func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5315
if (bf->bf_state.bfs_dobaw) {
5316
ath_tx_update_baw(sc, an, atid, bf);
5317
bf->bf_state.bfs_dobaw = 0;
5318
if (!bf->bf_state.bfs_addedbaw)
5319
DPRINTF(sc, ATH_DEBUG_SW_TX,
5320
"%s: wasn't added: seqno %d\n",
5321
__func__, SEQNO(bf->bf_state.bfs_seqno));
5322
}
5323
5324
/*
5325
* If the queue is filtered, re-schedule as required.
5326
*
5327
* This is required as there may be a subsequent TX descriptor
5328
* for this end-node that has CLRDMASK set, so it's quite possible
5329
* that a filtered frame will be followed by a non-filtered
5330
* (complete or otherwise) frame.
5331
*
5332
* XXX should we do this before we complete the frame?
5333
*/
5334
if (atid->isfiltered)
5335
ath_tx_tid_filt_comp_complete(sc, atid);
5336
5337
/*
5338
* Send BAR if required
5339
*/
5340
if (ath_tx_tid_bar_tx_ready(sc, atid))
5341
ath_tx_tid_bar_tx(sc, atid);
5342
5343
ATH_TX_UNLOCK(sc);
5344
5345
ath_tx_default_comp(sc, bf, fail);
5346
/* bf is freed at this point */
5347
}
5348
5349
void
5350
ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5351
{
5352
if (bf->bf_state.bfs_aggr)
5353
ath_tx_aggr_comp_aggr(sc, bf, fail);
5354
else
5355
ath_tx_aggr_comp_unaggr(sc, bf, fail);
5356
}
5357
5358
/*
5359
* Grab the software queue depth that we COULD transmit.
5360
*
5361
* This includes checks if it's in the BAW, whether it's a frame
5362
* that is supposed to be in the BAW. Other checks could be done;
5363
* but for now let's try and avoid doing the whole of ath_tx_form_aggr()
5364
* here.
5365
*/
5366
static int
5367
ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an,
5368
struct ath_tid *tid)
5369
{
5370
struct ath_buf *bf;
5371
struct ieee80211_tx_ampdu *tap;
5372
int nbytes = 0;
5373
5374
ATH_TX_LOCK_ASSERT(sc);
5375
5376
tap = ath_tx_get_tx_tid(an, tid->tid);
5377
5378
/*
5379
* Iterate over each buffer and sum the pkt_len.
5380
* Bail if we exceed ATH_AGGR_MAXSIZE bytes; we won't
5381
* ever queue more than that in a single frame.
5382
*/
5383
TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5384
/*
5385
* TODO: I'm not sure if we're going to hit cases where
5386
* no frames get sent because the list is empty.
5387
*/
5388
5389
/* Check if it's in the BAW */
5390
if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
5391
SEQNO(bf->bf_state.bfs_seqno)))) {
5392
break;
5393
}
5394
5395
/* Check if it's even supposed to be in the BAW */
5396
if (! bf->bf_state.bfs_dobaw) {
5397
break;
5398
}
5399
5400
nbytes += bf->bf_state.bfs_pktlen;
5401
if (nbytes >= ATH_AGGR_MAXSIZE)
5402
break;
5403
5404
/*
5405
* Check if we're likely going to leak a frame
5406
* as part of a PSPOLL. Break out at this point;
5407
* we're only going to send a single frame anyway.
5408
*/
5409
if (an->an_leak_count) {
5410
break;
5411
}
5412
}
5413
5414
return MIN(nbytes, ATH_AGGR_MAXSIZE);
5415
}
5416
5417
/*
5418
* Schedule some packets from the given node/TID to the hardware.
5419
*
5420
* This is the aggregate version.
5421
*/
5422
void
5423
ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5424
struct ath_tid *tid)
5425
{
5426
struct ath_buf *bf;
5427
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5428
struct ieee80211_tx_ampdu *tap;
5429
ATH_AGGR_STATUS status;
5430
ath_bufhead bf_q;
5431
int swq_pktbytes;
5432
5433
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5434
ATH_TX_LOCK_ASSERT(sc);
5435
5436
/*
5437
* XXX TODO: If we're called for a queue that we're leaking frames to,
5438
* ensure we only leak one.
5439
*/
5440
5441
tap = ath_tx_get_tx_tid(an, tid->tid);
5442
5443
if (tid->tid == IEEE80211_NONQOS_TID)
5444
DPRINTF(sc, ATH_DEBUG_SW_TX,
5445
"%s: called for TID=NONQOS_TID?\n", __func__);
5446
5447
for (;;) {
5448
status = ATH_AGGR_DONE;
5449
5450
/*
5451
* If the upper layer has paused the TID, don't
5452
* queue any further packets.
5453
*
5454
* This can also occur from the completion task because
5455
* of packet loss; but as its serialised with this code,
5456
* it won't "appear" half way through queuing packets.
5457
*/
5458
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5459
break;
5460
5461
bf = ATH_TID_FIRST(tid);
5462
if (bf == NULL) {
5463
break;
5464
}
5465
5466
/*
5467
* If the packet doesn't fall within the BAW (eg a NULL
5468
* data frame), schedule it directly; continue.
5469
*/
5470
if (! bf->bf_state.bfs_dobaw) {
5471
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5472
"%s: non-baw packet\n",
5473
__func__);
5474
ATH_TID_REMOVE(tid, bf, bf_list);
5475
5476
if (bf->bf_state.bfs_nframes > 1)
5477
DPRINTF(sc, ATH_DEBUG_SW_TX,
5478
"%s: aggr=%d, nframes=%d\n",
5479
__func__,
5480
bf->bf_state.bfs_aggr,
5481
bf->bf_state.bfs_nframes);
5482
5483
/*
5484
* This shouldn't happen - such frames shouldn't
5485
* ever have been queued as an aggregate in the
5486
* first place. However, make sure the fields
5487
* are correctly setup just to be totally sure.
5488
*/
5489
bf->bf_state.bfs_aggr = 0;
5490
bf->bf_state.bfs_nframes = 1;
5491
5492
/* Update CLRDMASK just before this frame is queued */
5493
ath_tx_update_clrdmask(sc, tid, bf);
5494
5495
ath_tx_do_ratelookup(sc, bf, tid->tid,
5496
bf->bf_state.bfs_pktlen, false);
5497
ath_tx_calc_duration(sc, bf);
5498
ath_tx_calc_protection(sc, bf);
5499
ath_tx_set_rtscts(sc, bf);
5500
ath_tx_rate_fill_rcflags(sc, bf);
5501
ath_tx_setds(sc, bf);
5502
ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5503
5504
sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5505
5506
/* Queue the packet; continue */
5507
goto queuepkt;
5508
}
5509
5510
TAILQ_INIT(&bf_q);
5511
5512
/*
5513
* Loop over the swq to find out how long
5514
* each packet is (up until 64k) and provide that
5515
* to the rate control lookup.
5516
*/
5517
swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid);
5518
ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5519
5520
/*
5521
* Note this only is used for the fragment paths and
5522
* should really be rethought out if we want to do
5523
* things like an RTS burst across >1 aggregate.
5524
*/
5525
ath_tx_calc_duration(sc, bf);
5526
ath_tx_calc_protection(sc, bf);
5527
5528
ath_tx_set_rtscts(sc, bf);
5529
ath_tx_rate_fill_rcflags(sc, bf);
5530
5531
status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5532
5533
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5534
"%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5535
5536
/*
5537
* No frames to be picked up - out of BAW
5538
*/
5539
if (TAILQ_EMPTY(&bf_q))
5540
break;
5541
5542
/*
5543
* This assumes that the descriptor list in the ath_bufhead
5544
* are already linked together via bf_next pointers.
5545
*/
5546
bf = TAILQ_FIRST(&bf_q);
5547
5548
if (status == ATH_AGGR_8K_LIMITED)
5549
sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5550
5551
/*
5552
* If it's the only frame send as non-aggregate
5553
* assume that ath_tx_form_aggr() has checked
5554
* whether it's in the BAW and added it appropriately.
5555
*/
5556
if (bf->bf_state.bfs_nframes == 1) {
5557
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5558
"%s: single-frame aggregate\n", __func__);
5559
5560
/* Update CLRDMASK just before this frame is queued */
5561
ath_tx_update_clrdmask(sc, tid, bf);
5562
5563
bf->bf_state.bfs_aggr = 0;
5564
bf->bf_state.bfs_ndelim = 0;
5565
ath_tx_setds(sc, bf);
5566
ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5567
if (status == ATH_AGGR_BAW_CLOSED)
5568
sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5569
else
5570
sc->sc_aggr_stats.aggr_single_pkt++;
5571
} else {
5572
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5573
"%s: multi-frame aggregate: %d frames, "
5574
"length %d\n",
5575
__func__, bf->bf_state.bfs_nframes,
5576
bf->bf_state.bfs_al);
5577
bf->bf_state.bfs_aggr = 1;
5578
sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5579
sc->sc_aggr_stats.aggr_aggr_pkt++;
5580
5581
/* Update CLRDMASK just before this frame is queued */
5582
ath_tx_update_clrdmask(sc, tid, bf);
5583
5584
/*
5585
* Calculate the duration/protection as required.
5586
*/
5587
ath_tx_calc_duration(sc, bf);
5588
ath_tx_calc_protection(sc, bf);
5589
5590
/*
5591
* Update the rate and rtscts information based on the
5592
* rate decision made by the rate control code;
5593
* the first frame in the aggregate needs it.
5594
*/
5595
ath_tx_set_rtscts(sc, bf);
5596
5597
/*
5598
* Setup the relevant descriptor fields
5599
* for aggregation. The first descriptor
5600
* already points to the rest in the chain.
5601
*/
5602
ath_tx_setds_11n(sc, bf);
5603
}
5604
queuepkt:
5605
/* Set completion handler, multi-frame aggregate or not */
5606
bf->bf_comp = ath_tx_aggr_comp;
5607
5608
if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5609
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5610
5611
/*
5612
* Update leak count and frame config if were leaking frames.
5613
*
5614
* XXX TODO: it should update all frames in an aggregate
5615
* correctly!
5616
*/
5617
ath_tx_leak_count_update(sc, tid, bf);
5618
5619
/* Punt to txq */
5620
ath_tx_handoff(sc, txq, bf);
5621
5622
/* Track outstanding buffer count to hardware */
5623
/* aggregates are "one" buffer */
5624
tid->hwq_depth++;
5625
5626
/*
5627
* Break out if ath_tx_form_aggr() indicated
5628
* there can't be any further progress (eg BAW is full.)
5629
* Checking for an empty txq is done above.
5630
*
5631
* XXX locking on txq here?
5632
*/
5633
/* XXX TXQ locking */
5634
if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5635
(status == ATH_AGGR_BAW_CLOSED ||
5636
status == ATH_AGGR_LEAK_CLOSED))
5637
break;
5638
}
5639
}
5640
5641
/*
5642
* Schedule some packets from the given node/TID to the hardware.
5643
*
5644
* XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5645
* It just dumps frames into the TXQ. We should limit how deep
5646
* the transmit queue can grow for frames dispatched to the given
5647
* TXQ.
5648
*
5649
* To avoid locking issues, either we need to own the TXQ lock
5650
* at this point, or we need to pass in the maximum frame count
5651
* from the caller.
5652
*/
5653
void
5654
ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5655
struct ath_tid *tid)
5656
{
5657
struct ath_buf *bf;
5658
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5659
5660
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5661
__func__, an, tid->tid);
5662
5663
ATH_TX_LOCK_ASSERT(sc);
5664
5665
/* Check - is AMPDU pending or running? then print out something */
5666
if (ath_tx_ampdu_pending(sc, an, tid->tid))
5667
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5668
__func__, tid->tid);
5669
if (ath_tx_ampdu_running(sc, an, tid->tid))
5670
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5671
__func__, tid->tid);
5672
5673
for (;;) {
5674
/*
5675
* If the upper layers have paused the TID, don't
5676
* queue any further packets.
5677
*
5678
* XXX if we are leaking frames, make sure we decrement
5679
* that counter _and_ we continue here.
5680
*/
5681
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5682
break;
5683
5684
bf = ATH_TID_FIRST(tid);
5685
if (bf == NULL) {
5686
break;
5687
}
5688
5689
ATH_TID_REMOVE(tid, bf, bf_list);
5690
5691
/* Sanity check! */
5692
if (tid->tid != bf->bf_state.bfs_tid) {
5693
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5694
" tid %d\n", __func__, bf->bf_state.bfs_tid,
5695
tid->tid);
5696
}
5697
/* Normal completion handler */
5698
bf->bf_comp = ath_tx_normal_comp;
5699
5700
/*
5701
* Override this for now, until the non-aggregate
5702
* completion handler correctly handles software retransmits.
5703
*/
5704
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5705
5706
/* Update CLRDMASK just before this frame is queued */
5707
ath_tx_update_clrdmask(sc, tid, bf);
5708
5709
/* Program descriptors + rate control */
5710
ath_tx_do_ratelookup(sc, bf, tid->tid,
5711
bf->bf_state.bfs_pktlen, false);
5712
ath_tx_calc_duration(sc, bf);
5713
ath_tx_calc_protection(sc, bf);
5714
ath_tx_set_rtscts(sc, bf);
5715
ath_tx_rate_fill_rcflags(sc, bf);
5716
ath_tx_setds(sc, bf);
5717
5718
/*
5719
* Update the current leak count if
5720
* we're leaking frames; and set the
5721
* MORE flag as appropriate.
5722
*/
5723
ath_tx_leak_count_update(sc, tid, bf);
5724
5725
/* Track outstanding buffer count to hardware */
5726
/* aggregates are "one" buffer */
5727
tid->hwq_depth++;
5728
5729
/* Punt to hardware or software txq */
5730
ath_tx_handoff(sc, txq, bf);
5731
}
5732
}
5733
5734
/*
5735
* Schedule some packets to the given hardware queue.
5736
*
5737
* This function walks the list of TIDs (ie, ath_node TIDs
5738
* with queued traffic) and attempts to schedule traffic
5739
* from them.
5740
*
5741
* TID scheduling is implemented as a FIFO, with TIDs being
5742
* added to the end of the queue after some frames have been
5743
* scheduled.
5744
*/
5745
void
5746
ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5747
{
5748
struct ath_tid *tid, *next, *last;
5749
5750
ATH_TX_LOCK_ASSERT(sc);
5751
5752
/*
5753
* For non-EDMA chips, aggr frames that have been built are
5754
* in axq_aggr_depth, whether they've been scheduled or not.
5755
* There's no FIFO, so txq->axq_depth is what's been scheduled
5756
* to the hardware.
5757
*
5758
* For EDMA chips, we do it in two stages. The existing code
5759
* builds a list of frames to go to the hardware and the EDMA
5760
* code turns it into a single entry to push into the FIFO.
5761
* That way we don't take up one packet per FIFO slot.
5762
* We do push one aggregate per FIFO slot though, just to keep
5763
* things simple.
5764
*
5765
* The FIFO depth is what's in the hardware; the txq->axq_depth
5766
* is what's been scheduled to the FIFO.
5767
*
5768
* fifo.axq_depth is the number of frames (or aggregates) pushed
5769
* into the EDMA FIFO. For multi-frame lists, this is the number
5770
* of frames pushed in.
5771
* axq_fifo_depth is the number of FIFO slots currently busy.
5772
*/
5773
5774
/* For EDMA and non-EDMA, check built/scheduled against aggr limit */
5775
if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5776
sc->sc_aggr_stats.aggr_sched_nopkt++;
5777
return;
5778
}
5779
5780
/*
5781
* For non-EDMA chips, axq_depth is the "what's scheduled to
5782
* the hardware list". For EDMA it's "What's built for the hardware"
5783
* and fifo.axq_depth is how many frames have been dispatched
5784
* already to the hardware.
5785
*/
5786
if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5787
sc->sc_aggr_stats.aggr_sched_nopkt++;
5788
return;
5789
}
5790
5791
last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5792
5793
TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5794
/*
5795
* Suspend paused queues here; they'll be resumed
5796
* once the addba completes or times out.
5797
*/
5798
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5799
__func__, tid->tid, tid->paused);
5800
ath_tx_tid_unsched(sc, tid);
5801
/*
5802
* This node may be in power-save and we're leaking
5803
* a frame; be careful.
5804
*/
5805
if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5806
goto loop_done;
5807
}
5808
if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5809
ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5810
else
5811
ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5812
5813
/* Not empty? Re-schedule */
5814
if (tid->axq_depth != 0)
5815
ath_tx_tid_sched(sc, tid);
5816
5817
/*
5818
* Give the software queue time to aggregate more
5819
* packets. If we aren't running aggregation then
5820
* we should still limit the hardware queue depth.
5821
*/
5822
/* XXX TXQ locking */
5823
if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5824
break;
5825
}
5826
if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5827
break;
5828
}
5829
loop_done:
5830
/*
5831
* If this was the last entry on the original list, stop.
5832
* Otherwise nodes that have been rescheduled onto the end
5833
* of the TID FIFO list will just keep being rescheduled.
5834
*
5835
* XXX What should we do about nodes that were paused
5836
* but are pending a leaking frame in response to a ps-poll?
5837
* They'll be put at the front of the list; so they'll
5838
* prematurely trigger this condition! Ew.
5839
*/
5840
if (tid == last)
5841
break;
5842
}
5843
}
5844
5845
/*
5846
* TX addba handling
5847
*/
5848
5849
/*
5850
* Return net80211 TID struct pointer, or NULL for none
5851
*/
5852
struct ieee80211_tx_ampdu *
5853
ath_tx_get_tx_tid(struct ath_node *an, int tid)
5854
{
5855
struct ieee80211_node *ni = &an->an_node;
5856
struct ieee80211_tx_ampdu *tap;
5857
5858
if (tid == IEEE80211_NONQOS_TID)
5859
return NULL;
5860
5861
tap = &ni->ni_tx_ampdu[tid];
5862
return tap;
5863
}
5864
5865
/*
5866
* Is AMPDU-TX running?
5867
*/
5868
static int
5869
ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5870
{
5871
struct ieee80211_tx_ampdu *tap;
5872
5873
if (tid == IEEE80211_NONQOS_TID)
5874
return 0;
5875
5876
tap = ath_tx_get_tx_tid(an, tid);
5877
if (tap == NULL)
5878
return 0; /* Not valid; default to not running */
5879
5880
return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5881
}
5882
5883
/*
5884
* Is AMPDU-TX negotiation pending?
5885
*/
5886
static int
5887
ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5888
{
5889
struct ieee80211_tx_ampdu *tap;
5890
5891
if (tid == IEEE80211_NONQOS_TID)
5892
return 0;
5893
5894
tap = ath_tx_get_tx_tid(an, tid);
5895
if (tap == NULL)
5896
return 0; /* Not valid; default to not pending */
5897
5898
return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5899
}
5900
5901
/*
5902
* Is AMPDU-TX pending for the given TID?
5903
*/
5904
5905
/*
5906
* Method to handle sending an ADDBA request.
5907
*
5908
* We tap this so the relevant flags can be set to pause the TID
5909
* whilst waiting for the response.
5910
*
5911
* XXX there's no timeout handler we can override?
5912
*/
5913
int
5914
ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5915
int dialogtoken, int baparamset, int batimeout)
5916
{
5917
struct ath_softc *sc = ni->ni_ic->ic_softc;
5918
int tid = tap->txa_tid;
5919
struct ath_node *an = ATH_NODE(ni);
5920
struct ath_tid *atid = &an->an_tid[tid];
5921
5922
/*
5923
* XXX danger Will Robinson!
5924
*
5925
* Although the taskqueue may be running and scheduling some more
5926
* packets, these should all be _before_ the addba sequence number.
5927
* However, net80211 will keep self-assigning sequence numbers
5928
* until addba has been negotiated.
5929
*
5930
* In the past, these packets would be "paused" (which still works
5931
* fine, as they're being scheduled to the driver in the same
5932
* serialised method which is calling the addba request routine)
5933
* and when the aggregation session begins, they'll be dequeued
5934
* as aggregate packets and added to the BAW. However, now there's
5935
* a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5936
* packets. Thus they never get included in the BAW tracking and
5937
* this can cause the initial burst of packets after the addba
5938
* negotiation to "hang", as they quickly fall outside the BAW.
5939
*
5940
* The "eventual" solution should be to tag these packets with
5941
* dobaw. Although net80211 has given us a sequence number,
5942
* it'll be "after" the left edge of the BAW and thus it'll
5943
* fall within it.
5944
*/
5945
ATH_TX_LOCK(sc);
5946
/*
5947
* This is a bit annoying. Until net80211 HT code inherits some
5948
* (any) locking, we may have this called in parallel BUT only
5949
* one response/timeout will be called. Grr.
5950
*/
5951
if (atid->addba_tx_pending == 0) {
5952
ath_tx_tid_pause(sc, atid);
5953
atid->addba_tx_pending = 1;
5954
}
5955
ATH_TX_UNLOCK(sc);
5956
5957
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5958
"%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5959
__func__,
5960
ni->ni_macaddr,
5961
":",
5962
dialogtoken, baparamset, batimeout);
5963
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5964
"%s: txa_start=%d, ni_txseqs=%d\n",
5965
__func__, tap->txa_start, ni->ni_txseqs[tid]);
5966
5967
return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5968
batimeout);
5969
}
5970
5971
/*
5972
* Handle an ADDBA response.
5973
*
5974
* We unpause the queue so TX'ing can resume.
5975
*
5976
* Any packets TX'ed from this point should be "aggregate" (whether
5977
* aggregate or not) so the BAW is updated.
5978
*
5979
* Note! net80211 keeps self-assigning sequence numbers until
5980
* ampdu is negotiated. This means the initially-negotiated BAW left
5981
* edge won't match the ni->ni_txseq.
5982
*
5983
* So, being very dirty, the BAW left edge is "slid" here to match
5984
* ni->ni_txseq.
5985
*
5986
* What likely SHOULD happen is that all packets subsequent to the
5987
* addba request should be tagged as aggregate and queued as non-aggregate
5988
* frames; thus updating the BAW. For now though, I'll just slide the
5989
* window.
5990
*/
5991
int
5992
ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5993
int status, int code, int batimeout)
5994
{
5995
struct ath_softc *sc = ni->ni_ic->ic_softc;
5996
int tid = tap->txa_tid;
5997
struct ath_node *an = ATH_NODE(ni);
5998
struct ath_tid *atid = &an->an_tid[tid];
5999
int r;
6000
6001
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6002
"%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
6003
ni->ni_macaddr,
6004
":",
6005
status, code, batimeout);
6006
6007
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6008
"%s: txa_start=%d, ni_txseqs=%d\n",
6009
__func__, tap->txa_start, ni->ni_txseqs[tid]);
6010
6011
/*
6012
* Call this first, so the interface flags get updated
6013
* before the TID is unpaused. Otherwise a race condition
6014
* exists where the unpaused TID still doesn't yet have
6015
* IEEE80211_AGGR_RUNNING set.
6016
*/
6017
r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6018
6019
ATH_TX_LOCK(sc);
6020
atid->addba_tx_pending = 0;
6021
/*
6022
* XXX dirty!
6023
* Slide the BAW left edge to wherever net80211 left it for us.
6024
* Read above for more information.
6025
*/
6026
tap->txa_start = ni->ni_txseqs[tid];
6027
ath_tx_tid_resume(sc, atid);
6028
ATH_TX_UNLOCK(sc);
6029
return r;
6030
}
6031
6032
/*
6033
* Stop ADDBA on a queue.
6034
*
6035
* This can be called whilst BAR TX is currently active on the queue,
6036
* so make sure this is unblocked before continuing.
6037
*/
6038
void
6039
ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
6040
{
6041
struct ath_softc *sc = ni->ni_ic->ic_softc;
6042
int tid = tap->txa_tid;
6043
struct ath_node *an = ATH_NODE(ni);
6044
struct ath_tid *atid = &an->an_tid[tid];
6045
ath_bufhead bf_cq;
6046
struct ath_buf *bf;
6047
6048
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
6049
__func__,
6050
ni->ni_macaddr,
6051
":");
6052
6053
/*
6054
* Pause TID traffic early, so there aren't any races
6055
* Unblock the pending BAR held traffic, if it's currently paused.
6056
*/
6057
ATH_TX_LOCK(sc);
6058
ath_tx_tid_pause(sc, atid);
6059
if (atid->bar_wait) {
6060
/*
6061
* bar_unsuspend() expects bar_tx == 1, as it should be
6062
* called from the TX completion path. This quietens
6063
* the warning. It's cleared for us anyway.
6064
*/
6065
atid->bar_tx = 1;
6066
ath_tx_tid_bar_unsuspend(sc, atid);
6067
}
6068
ATH_TX_UNLOCK(sc);
6069
6070
/* There's no need to hold the TXQ lock here */
6071
sc->sc_addba_stop(ni, tap);
6072
6073
/*
6074
* ath_tx_tid_cleanup will resume the TID if possible, otherwise
6075
* it'll set the cleanup flag, and it'll be unpaused once
6076
* things have been cleaned up.
6077
*/
6078
TAILQ_INIT(&bf_cq);
6079
ATH_TX_LOCK(sc);
6080
6081
/*
6082
* In case there's a followup call to this, only call it
6083
* if we don't have a cleanup in progress.
6084
*
6085
* Since we've paused the queue above, we need to make
6086
* sure we unpause if there's already a cleanup in
6087
* progress - it means something else is also doing
6088
* this stuff, so we don't need to also keep it paused.
6089
*/
6090
if (atid->cleanup_inprogress) {
6091
ath_tx_tid_resume(sc, atid);
6092
} else {
6093
ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
6094
/*
6095
* Unpause the TID if no cleanup is required.
6096
*/
6097
if (! atid->cleanup_inprogress)
6098
ath_tx_tid_resume(sc, atid);
6099
}
6100
ATH_TX_UNLOCK(sc);
6101
6102
/* Handle completing frames and fail them */
6103
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6104
TAILQ_REMOVE(&bf_cq, bf, bf_list);
6105
ath_tx_default_comp(sc, bf, 1);
6106
}
6107
6108
}
6109
6110
/*
6111
* Handle a node reassociation.
6112
*
6113
* We may have a bunch of frames queued to the hardware; those need
6114
* to be marked as cleanup.
6115
*/
6116
void
6117
ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
6118
{
6119
struct ath_tid *tid;
6120
int i;
6121
ath_bufhead bf_cq;
6122
struct ath_buf *bf;
6123
6124
TAILQ_INIT(&bf_cq);
6125
6126
ATH_TX_UNLOCK_ASSERT(sc);
6127
6128
ATH_TX_LOCK(sc);
6129
for (i = 0; i < IEEE80211_TID_SIZE; i++) {
6130
tid = &an->an_tid[i];
6131
if (tid->hwq_depth == 0)
6132
continue;
6133
DPRINTF(sc, ATH_DEBUG_NODE,
6134
"%s: %6D: TID %d: cleaning up TID\n",
6135
__func__,
6136
an->an_node.ni_macaddr,
6137
":",
6138
i);
6139
/*
6140
* In case there's a followup call to this, only call it
6141
* if we don't have a cleanup in progress.
6142
*/
6143
if (! tid->cleanup_inprogress) {
6144
ath_tx_tid_pause(sc, tid);
6145
ath_tx_tid_cleanup(sc, an, i, &bf_cq);
6146
/*
6147
* Unpause the TID if no cleanup is required.
6148
*/
6149
if (! tid->cleanup_inprogress)
6150
ath_tx_tid_resume(sc, tid);
6151
}
6152
}
6153
ATH_TX_UNLOCK(sc);
6154
6155
/* Handle completing frames and fail them */
6156
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6157
TAILQ_REMOVE(&bf_cq, bf, bf_list);
6158
ath_tx_default_comp(sc, bf, 1);
6159
}
6160
}
6161
6162
/*
6163
* Note: net80211 bar_timeout() doesn't call this function on BAR failure;
6164
* it simply tears down the aggregation session. Ew.
6165
*
6166
* It however will call ieee80211_ampdu_stop() which will call
6167
* ic->ic_addba_stop().
6168
*
6169
* XXX This uses a hard-coded max BAR count value; the whole
6170
* XXX BAR TX success or failure should be better handled!
6171
*/
6172
void
6173
ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
6174
int status)
6175
{
6176
struct ath_softc *sc = ni->ni_ic->ic_softc;
6177
int tid = tap->txa_tid;
6178
struct ath_node *an = ATH_NODE(ni);
6179
struct ath_tid *atid = &an->an_tid[tid];
6180
int attempts = tap->txa_attempts;
6181
int old_txa_start;
6182
6183
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6184
"%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
6185
__func__,
6186
ni->ni_macaddr,
6187
":",
6188
tap->txa_tid,
6189
atid->tid,
6190
status,
6191
attempts,
6192
tap->txa_start,
6193
tap->txa_seqpending);
6194
6195
/* Note: This may update the BAW details */
6196
/*
6197
* XXX What if this does slide the BAW along? We need to somehow
6198
* XXX either fix things when it does happen, or prevent the
6199
* XXX seqpending value to be anything other than exactly what
6200
* XXX the hell we want!
6201
*
6202
* XXX So for now, how I do this inside the TX lock for now
6203
* XXX and just correct it afterwards? The below condition should
6204
* XXX never happen and if it does I need to fix all kinds of things.
6205
*/
6206
ATH_TX_LOCK(sc);
6207
old_txa_start = tap->txa_start;
6208
sc->sc_bar_response(ni, tap, status);
6209
if (tap->txa_start != old_txa_start) {
6210
device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6211
__func__,
6212
tid,
6213
tap->txa_start,
6214
old_txa_start);
6215
}
6216
tap->txa_start = old_txa_start;
6217
ATH_TX_UNLOCK(sc);
6218
6219
/* Unpause the TID */
6220
/*
6221
* XXX if this is attempt=50, the TID will be downgraded
6222
* XXX to a non-aggregate session. So we must unpause the
6223
* XXX TID here or it'll never be done.
6224
*
6225
* Also, don't call it if bar_tx/bar_wait are 0; something
6226
* has beaten us to the punch? (XXX figure out what?)
6227
*/
6228
if (status == 0 || attempts == 50) {
6229
ATH_TX_LOCK(sc);
6230
if (atid->bar_tx == 0 || atid->bar_wait == 0)
6231
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6232
"%s: huh? bar_tx=%d, bar_wait=%d\n",
6233
__func__,
6234
atid->bar_tx, atid->bar_wait);
6235
else
6236
ath_tx_tid_bar_unsuspend(sc, atid);
6237
ATH_TX_UNLOCK(sc);
6238
}
6239
}
6240
6241
/*
6242
* This is called whenever the pending ADDBA request times out.
6243
* Unpause and reschedule the TID.
6244
*/
6245
void
6246
ath_addba_response_timeout(struct ieee80211_node *ni,
6247
struct ieee80211_tx_ampdu *tap)
6248
{
6249
struct ath_softc *sc = ni->ni_ic->ic_softc;
6250
int tid = tap->txa_tid;
6251
struct ath_node *an = ATH_NODE(ni);
6252
struct ath_tid *atid = &an->an_tid[tid];
6253
6254
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6255
"%s: %6D: TID=%d, called; resuming\n",
6256
__func__,
6257
ni->ni_macaddr,
6258
":",
6259
tid);
6260
6261
ATH_TX_LOCK(sc);
6262
atid->addba_tx_pending = 0;
6263
ATH_TX_UNLOCK(sc);
6264
6265
/* Note: This updates the aggregate state to (again) pending */
6266
sc->sc_addba_response_timeout(ni, tap);
6267
6268
/* Unpause the TID; which reschedules it */
6269
ATH_TX_LOCK(sc);
6270
ath_tx_tid_resume(sc, atid);
6271
ATH_TX_UNLOCK(sc);
6272
}
6273
6274
/*
6275
* Check if a node is asleep or not.
6276
*/
6277
int
6278
ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6279
{
6280
6281
ATH_TX_LOCK_ASSERT(sc);
6282
6283
return (an->an_is_powersave);
6284
}
6285
6286
/*
6287
* Mark a node as currently "in powersaving."
6288
* This suspends all traffic on the node.
6289
*
6290
* This must be called with the node/tx locks free.
6291
*
6292
* XXX TODO: the locking silliness below is due to how the node
6293
* locking currently works. Right now, the node lock is grabbed
6294
* to do rate control lookups and these are done with the TX
6295
* queue lock held. This means the node lock can't be grabbed
6296
* first here or a LOR will occur.
6297
*
6298
* Eventually (hopefully!) the TX path code will only grab
6299
* the TXQ lock when transmitting and the ath_node lock when
6300
* doing node/TID operations. There are other complications -
6301
* the sched/unsched operations involve walking the per-txq
6302
* 'active tid' list and this requires both locks to be held.
6303
*/
6304
void
6305
ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6306
{
6307
struct ath_tid *atid;
6308
struct ath_txq *txq;
6309
int tid;
6310
6311
ATH_TX_UNLOCK_ASSERT(sc);
6312
6313
/* Suspend all traffic on the node */
6314
ATH_TX_LOCK(sc);
6315
6316
if (an->an_is_powersave) {
6317
DPRINTF(sc, ATH_DEBUG_XMIT,
6318
"%s: %6D: node was already asleep!\n",
6319
__func__, an->an_node.ni_macaddr, ":");
6320
ATH_TX_UNLOCK(sc);
6321
return;
6322
}
6323
6324
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6325
atid = &an->an_tid[tid];
6326
txq = sc->sc_ac2q[atid->ac];
6327
6328
ath_tx_tid_pause(sc, atid);
6329
}
6330
6331
/* Mark node as in powersaving */
6332
an->an_is_powersave = 1;
6333
6334
ATH_TX_UNLOCK(sc);
6335
}
6336
6337
/*
6338
* Mark a node as currently "awake."
6339
* This resumes all traffic to the node.
6340
*/
6341
void
6342
ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6343
{
6344
struct ath_tid *atid;
6345
struct ath_txq *txq;
6346
int tid;
6347
6348
ATH_TX_UNLOCK_ASSERT(sc);
6349
6350
ATH_TX_LOCK(sc);
6351
6352
/* !? */
6353
if (an->an_is_powersave == 0) {
6354
ATH_TX_UNLOCK(sc);
6355
DPRINTF(sc, ATH_DEBUG_XMIT,
6356
"%s: an=%p: node was already awake\n",
6357
__func__, an);
6358
return;
6359
}
6360
6361
/* Mark node as awake */
6362
an->an_is_powersave = 0;
6363
/*
6364
* Clear any pending leaked frame requests
6365
*/
6366
an->an_leak_count = 0;
6367
6368
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6369
atid = &an->an_tid[tid];
6370
txq = sc->sc_ac2q[atid->ac];
6371
6372
ath_tx_tid_resume(sc, atid);
6373
}
6374
ATH_TX_UNLOCK(sc);
6375
}
6376
6377
static int
6378
ath_legacy_dma_txsetup(struct ath_softc *sc)
6379
{
6380
6381
/* nothing new needed */
6382
return (0);
6383
}
6384
6385
static int
6386
ath_legacy_dma_txteardown(struct ath_softc *sc)
6387
{
6388
6389
/* nothing new needed */
6390
return (0);
6391
}
6392
6393
void
6394
ath_xmit_setup_legacy(struct ath_softc *sc)
6395
{
6396
/*
6397
* For now, just set the descriptor length to sizeof(ath_desc);
6398
* worry about extracting the real length out of the HAL later.
6399
*/
6400
sc->sc_tx_desclen = sizeof(struct ath_desc);
6401
sc->sc_tx_statuslen = sizeof(struct ath_desc);
6402
sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6403
6404
sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6405
sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6406
sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6407
6408
sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6409
sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6410
6411
sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6412
}
6413
6414