/*-1* SPDX-License-Identifier: BSD-2-Clause2*3* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting4* Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd5* All rights reserved.6*7* Redistribution and use in source and binary forms, with or without8* modification, are permitted provided that the following conditions9* are met:10* 1. Redistributions of source code must retain the above copyright11* notice, this list of conditions and the following disclaimer,12* without modification.13* 2. Redistributions in binary form must reproduce at minimum a disclaimer14* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any15* redistribution must be conditioned upon including a substantially16* similar Disclaimer requirement for further binary redistribution.17*18* NO WARRANTY19* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS20* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT21* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY22* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL23* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,24* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF25* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS26* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER27* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)28* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF29* THE POSSIBILITY OF SUCH DAMAGES.30*/3132#include <sys/cdefs.h>33/*34* Driver for the Atheros Wireless LAN controller.35*36* This software is derived from work of Atsushi Onoe; his contribution37* is greatly appreciated.38*/3940#include "opt_inet.h"41#include "opt_ath.h"42#include "opt_wlan.h"4344#include <sys/param.h>45#include <sys/systm.h>46#include <sys/sysctl.h>47#include <sys/mbuf.h>48#include <sys/malloc.h>49#include <sys/lock.h>50#include <sys/mutex.h>51#include <sys/kernel.h>52#include <sys/socket.h>53#include <sys/sockio.h>54#include <sys/errno.h>55#include <sys/callout.h>56#include <sys/bus.h>57#include <sys/endian.h>58#include <sys/kthread.h>59#include <sys/taskqueue.h>60#include <sys/priv.h>61#include <sys/ktr.h>6263#include <machine/bus.h>6465#include <net/if.h>66#include <net/if_var.h>67#include <net/if_dl.h>68#include <net/if_media.h>69#include <net/if_types.h>70#include <net/if_arp.h>71#include <net/ethernet.h>72#include <net/if_llc.h>7374#include <net80211/ieee80211_var.h>75#include <net80211/ieee80211_regdomain.h>76#ifdef IEEE80211_SUPPORT_SUPERG77#include <net80211/ieee80211_superg.h>78#endif79#ifdef IEEE80211_SUPPORT_TDMA80#include <net80211/ieee80211_tdma.h>81#endif82#include <net80211/ieee80211_ht.h>8384#include <net/bpf.h>8586#ifdef INET87#include <netinet/in.h>88#include <netinet/if_ether.h>89#endif9091#include <dev/ath/if_athvar.h>92#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */93#include <dev/ath/ath_hal/ah_diagcodes.h>9495#include <dev/ath/if_ath_debug.h>9697#ifdef ATH_TX99_DIAG98#include <dev/ath/ath_tx99/ath_tx99.h>99#endif100101#include <dev/ath/if_ath_misc.h>102#include <dev/ath/if_ath_tx.h>103#include <dev/ath/if_ath_tx_ht.h>104105#ifdef ATH_DEBUG_ALQ106#include <dev/ath/if_ath_alq.h>107#endif108109/*110* How many retries to perform in software111*/112#define SWMAX_RETRIES 10113114/*115* What queue to throw the non-QoS TID traffic into116*/117#define ATH_NONQOS_TID_AC WME_AC_VO118119#if 0120static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);121#endif122static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,123int tid);124static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,125int tid);126static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,127struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);128static int ath_tx_action_frame_override_queue(struct ath_softc *sc,129struct ieee80211_node *ni, struct mbuf *m0, int *tid);130static struct ath_buf *131ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,132struct ath_tid *tid, struct ath_buf *bf);133134#ifdef ATH_DEBUG_ALQ135void136ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)137{138struct ath_buf *bf;139int i, n;140const char *ds;141142/* XXX we should skip out early if debugging isn't enabled! */143bf = bf_first;144145while (bf != NULL) {146/* XXX should ensure bf_nseg > 0! */147if (bf->bf_nseg == 0)148break;149n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;150for (i = 0, ds = (const char *) bf->bf_desc;151i < n;152i++, ds += sc->sc_tx_desclen) {153if_ath_alq_post(&sc->sc_alq,154ATH_ALQ_EDMA_TXDESC,155sc->sc_tx_desclen,156ds);157}158bf = bf->bf_next;159}160}161#endif /* ATH_DEBUG_ALQ */162163/*164* Whether to use the 11n rate scenario functions or not165*/166static inline int167ath_tx_is_11n(struct ath_softc *sc)168{169return ((sc->sc_ah->ah_magic == 0x20065416) ||170(sc->sc_ah->ah_magic == 0x19741014));171}172173/*174* Obtain the current TID from the given frame.175*176* Non-QoS frames get mapped to a TID so frames consistently177* go on a sensible queue.178*/179static int180ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)181{182const struct ieee80211_frame *wh;183184wh = mtod(m0, const struct ieee80211_frame *);185186/* Non-QoS: map frame to a TID queue for software queueing */187if (! IEEE80211_QOS_HAS_SEQ(wh))188return (WME_AC_TO_TID(M_WME_GETAC(m0)));189190/* QoS - fetch the TID from the header, ignore mbuf WME */191return (ieee80211_gettid(wh));192}193194static void195ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)196{197struct ieee80211_frame *wh;198199wh = mtod(bf->bf_m, struct ieee80211_frame *);200/* Only update/resync if needed */201if (bf->bf_state.bfs_isretried == 0) {202wh->i_fc[1] |= IEEE80211_FC1_RETRY;203bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,204BUS_DMASYNC_PREWRITE);205}206bf->bf_state.bfs_isretried = 1;207bf->bf_state.bfs_retries ++;208}209210/*211* Determine what the correct AC queue for the given frame212* should be.213*214* For QoS frames, obey the TID. That way things like215* management frames that are related to a given TID216* are thus serialised with the rest of the TID traffic,217* regardless of net80211 overriding priority.218*219* For non-QoS frames, return the mbuf WMI priority.220*221* This has implications that higher priority non-QoS traffic222* may end up being scheduled before other non-QoS traffic,223* leading to out-of-sequence packets being emitted.224*225* (It'd be nice to log/count this so we can see if it226* really is a problem.)227*228* TODO: maybe we should throw multicast traffic, QoS or229* otherwise, into a separate TX queue?230*/231static int232ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)233{234const struct ieee80211_frame *wh;235236wh = mtod(m0, const struct ieee80211_frame *);237238/*239* QoS data frame (sequence number or otherwise) -240* return hardware queue mapping for the underlying241* TID.242*/243if (IEEE80211_QOS_HAS_SEQ(wh))244return TID_TO_WME_AC(ieee80211_gettid(wh));245246/*247* Otherwise - return mbuf QoS pri.248*/249return (M_WME_GETAC(m0));250}251252void253ath_txfrag_cleanup(struct ath_softc *sc,254ath_bufhead *frags, struct ieee80211_node *ni)255{256struct ath_buf *bf, *next;257258ATH_TXBUF_LOCK_ASSERT(sc);259260TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {261/* NB: bf assumed clean */262TAILQ_REMOVE(frags, bf, bf_list);263ath_returnbuf_head(sc, bf);264ieee80211_node_decref(ni);265}266}267268/*269* Setup xmit of a fragmented frame. Allocate a buffer270* for each frag and bump the node reference count to271* reflect the held reference to be setup by ath_tx_start.272*/273int274ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,275struct mbuf *m0, struct ieee80211_node *ni)276{277struct mbuf *m;278struct ath_buf *bf;279280ATH_TXBUF_LOCK(sc);281for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {282/* XXX non-management? */283bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);284if (bf == NULL) { /* out of buffers, cleanup */285DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",286__func__);287ath_txfrag_cleanup(sc, frags, ni);288break;289}290(void) ieee80211_ref_node(ni);291TAILQ_INSERT_TAIL(frags, bf, bf_list);292}293ATH_TXBUF_UNLOCK(sc);294295return !TAILQ_EMPTY(frags);296}297298static int299ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)300{301struct mbuf *m;302int error;303304/*305* Load the DMA map so any coalescing is done. This306* also calculates the number of descriptors we need.307*/308error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,309bf->bf_segs, &bf->bf_nseg,310BUS_DMA_NOWAIT);311if (error == EFBIG) {312/* XXX packet requires too many descriptors */313bf->bf_nseg = ATH_MAX_SCATTER + 1;314} else if (error != 0) {315sc->sc_stats.ast_tx_busdma++;316ieee80211_free_mbuf(m0);317return error;318}319/*320* Discard null packets and check for packets that321* require too many TX descriptors. We try to convert322* the latter to a cluster.323*/324if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */325sc->sc_stats.ast_tx_linear++;326m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);327if (m == NULL) {328ieee80211_free_mbuf(m0);329sc->sc_stats.ast_tx_nombuf++;330return ENOMEM;331}332m0 = m;333error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,334bf->bf_segs, &bf->bf_nseg,335BUS_DMA_NOWAIT);336if (error != 0) {337sc->sc_stats.ast_tx_busdma++;338ieee80211_free_mbuf(m0);339return error;340}341KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,342("too many segments after defrag; nseg %u", bf->bf_nseg));343} else if (bf->bf_nseg == 0) { /* null packet, discard */344sc->sc_stats.ast_tx_nodata++;345ieee80211_free_mbuf(m0);346return EIO;347}348DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",349__func__, m0, m0->m_pkthdr.len);350bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);351bf->bf_m = m0;352353return 0;354}355356/*357* Chain together segments+descriptors for a frame - 11n or otherwise.358*359* For aggregates, this is called on each frame in the aggregate.360*/361static void362ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,363struct ath_buf *bf, bool is_aggr, int is_first_subframe,364int is_last_subframe)365{366struct ath_hal *ah = sc->sc_ah;367char *ds;368int i, bp, dsp;369HAL_DMA_ADDR bufAddrList[4];370uint32_t segLenList[4];371int numTxMaps = 1;372int isFirstDesc = 1;373374/*375* XXX There's txdma and txdma_mgmt; the descriptor376* sizes must match.377*/378struct ath_descdma *dd = &sc->sc_txdma;379380/*381* Fillin the remainder of the descriptor info.382*/383384/*385* We need the number of TX data pointers in each descriptor.386* EDMA and later chips support 4 TX buffers per descriptor;387* previous chips just support one.388*/389numTxMaps = sc->sc_tx_nmaps;390391/*392* For EDMA and later chips ensure the TX map is fully populated393* before advancing to the next descriptor.394*/395ds = (char *) bf->bf_desc;396bp = dsp = 0;397bzero(bufAddrList, sizeof(bufAddrList));398bzero(segLenList, sizeof(segLenList));399for (i = 0; i < bf->bf_nseg; i++) {400bufAddrList[bp] = bf->bf_segs[i].ds_addr;401segLenList[bp] = bf->bf_segs[i].ds_len;402bp++;403404/*405* Go to the next segment if this isn't the last segment406* and there's space in the current TX map.407*/408if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))409continue;410411/*412* Last segment or we're out of buffer pointers.413*/414bp = 0;415416if (i == bf->bf_nseg - 1)417ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);418else419ath_hal_settxdesclink(ah, (struct ath_desc *) ds,420bf->bf_daddr + dd->dd_descsize * (dsp + 1));421422/*423* XXX This assumes that bfs_txq is the actual destination424* hardware queue at this point. It may not have been425* assigned, it may actually be pointing to the multicast426* software TXQ id. These must be fixed!427*/428ath_hal_filltxdesc(ah, (struct ath_desc *) ds429, bufAddrList430, segLenList431, bf->bf_descid /* XXX desc id */432, bf->bf_state.bfs_tx_queue433, isFirstDesc /* first segment */434, i == bf->bf_nseg - 1 /* last segment */435, (struct ath_desc *) ds0 /* first descriptor */436);437438/*439* Make sure the 11n aggregate fields are cleared.440*441* XXX TODO: this doesn't need to be called for442* aggregate frames; as it'll be called on all443* sub-frames. Since the descriptors are in444* non-cacheable memory, this leads to some445* rather slow writes on MIPS/ARM platforms.446*/447if (ath_tx_is_11n(sc))448ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);449450/*451* If 11n is enabled, set it up as if it's an aggregate452* frame.453*/454if (is_last_subframe) {455ath_hal_set11n_aggr_last(sc->sc_ah,456(struct ath_desc *) ds);457} else if (is_aggr) {458/*459* This clears the aggrlen field; so460* the caller needs to call set_aggr_first()!461*462* XXX TODO: don't call this for the first463* descriptor in the first frame in an464* aggregate!465*/466ath_hal_set11n_aggr_middle(sc->sc_ah,467(struct ath_desc *) ds,468bf->bf_state.bfs_ndelim);469}470isFirstDesc = 0;471bf->bf_lastds = (struct ath_desc *) ds;472473/*474* Don't forget to skip to the next descriptor.475*/476ds += sc->sc_tx_desclen;477dsp++;478479/*480* .. and don't forget to blank these out!481*/482bzero(bufAddrList, sizeof(bufAddrList));483bzero(segLenList, sizeof(segLenList));484}485bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);486}487488/*489* Set the rate control fields in the given descriptor based on490* the bf_state fields and node state.491*492* The bfs fields should already be set with the relevant rate493* control information, including whether MRR is to be enabled.494*495* Since the FreeBSD HAL currently sets up the first TX rate496* in ath_hal_setuptxdesc(), this will setup the MRR497* conditionally for the pre-11n chips, and call ath_buf_set_rate498* unconditionally for 11n chips. These require the 11n rate499* scenario to be set if MCS rates are enabled, so it's easier500* to just always call it. The caller can then only set rates 2, 3501* and 4 if multi-rate retry is needed.502*/503static void504ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,505struct ath_buf *bf)506{507struct ath_rc_series *rc = bf->bf_state.bfs_rc;508509/* If mrr is disabled, blank tries 1, 2, 3 */510if (! bf->bf_state.bfs_ismrr)511rc[1].tries = rc[2].tries = rc[3].tries = 0;512513#if 0514/*515* If NOACK is set, just set ntries=1.516*/517else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {518rc[1].tries = rc[2].tries = rc[3].tries = 0;519rc[0].tries = 1;520}521#endif522523/*524* Always call - that way a retried descriptor will525* have the MRR fields overwritten.526*527* XXX TODO: see if this is really needed - setting up528* the first descriptor should set the MRR fields to 0529* for us anyway.530*/531if (ath_tx_is_11n(sc)) {532ath_buf_set_rate(sc, ni, bf);533} else {534ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc535, rc[1].ratecode, rc[1].tries536, rc[2].ratecode, rc[2].tries537, rc[3].ratecode, rc[3].tries538);539}540}541542/*543* Setup segments+descriptors for an 11n aggregate.544* bf_first is the first buffer in the aggregate.545* The descriptor list must already been linked together using546* bf->bf_next.547*/548static void549ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)550{551struct ath_buf *bf, *bf_prev = NULL;552struct ath_desc *ds0 = bf_first->bf_desc;553554DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",555__func__, bf_first->bf_state.bfs_nframes,556bf_first->bf_state.bfs_al);557558bf = bf_first;559560if (bf->bf_state.bfs_txrate0 == 0)561DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",562__func__, bf, 0);563if (bf->bf_state.bfs_rc[0].ratecode == 0)564DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",565__func__, bf, 0);566567/*568* Setup all descriptors of all subframes - this will569* call ath_hal_set11naggrmiddle() on every frame.570*/571while (bf != NULL) {572DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,573"%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",574__func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,575SEQNO(bf->bf_state.bfs_seqno));576577/*578* Setup the initial fields for the first descriptor - all579* the non-11n specific stuff.580*/581ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc582, bf->bf_state.bfs_pktlen /* packet length */583, bf->bf_state.bfs_hdrlen /* header length */584, bf->bf_state.bfs_atype /* Atheros packet type */585, bf->bf_state.bfs_txpower /* txpower */586, bf->bf_state.bfs_txrate0587, bf->bf_state.bfs_try0 /* series 0 rate/tries */588, bf->bf_state.bfs_keyix /* key cache index */589, bf->bf_state.bfs_txantenna /* antenna mode */590, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */591, bf->bf_state.bfs_ctsrate /* rts/cts rate */592, bf->bf_state.bfs_ctsduration /* rts/cts duration */593);594595/*596* First descriptor? Setup the rate control and initial597* aggregate header information.598*/599if (bf == bf_first) {600/*601* setup first desc with rate and aggr info602*/603ath_tx_set_ratectrl(sc, bf->bf_node, bf);604}605606/*607* Setup the descriptors for a multi-descriptor frame.608* This is both aggregate and non-aggregate aware.609*/610ath_tx_chaindesclist(sc, ds0, bf,6111, /* is_aggr */612!! (bf == bf_first), /* is_first_subframe */613!! (bf->bf_next == NULL) /* is_last_subframe */614);615616if (bf == bf_first) {617/*618* Initialise the first 11n aggregate with the619* aggregate length and aggregate enable bits.620*/621ath_hal_set11n_aggr_first(sc->sc_ah,622ds0,623bf->bf_state.bfs_al,624bf->bf_state.bfs_ndelim);625}626627/*628* Link the last descriptor of the previous frame629* to the beginning descriptor of this frame.630*/631if (bf_prev != NULL)632ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,633bf->bf_daddr);634635/* Save a copy so we can link the next descriptor in */636bf_prev = bf;637bf = bf->bf_next;638}639640/*641* Set the first descriptor bf_lastds field to point to642* the last descriptor in the last subframe, that's where643* the status update will occur.644*/645bf_first->bf_lastds = bf_prev->bf_lastds;646647/*648* And bf_last in the first descriptor points to the end of649* the aggregate list.650*/651bf_first->bf_last = bf_prev;652653/*654* For non-AR9300 NICs, which require the rate control655* in the final descriptor - let's set that up now.656*657* This is because the filltxdesc() HAL call doesn't658* populate the last segment with rate control information659* if firstSeg is also true. For non-aggregate frames660* that is fine, as the first frame already has rate control661* info. But if the last frame in an aggregate has one662* descriptor, both firstseg and lastseg will be true and663* the rate info isn't copied.664*665* This is inefficient on MIPS/ARM platforms that have666* non-cachable memory for TX descriptors, but we'll just667* make do for now.668*669* As to why the rate table is stashed in the last descriptor670* rather than the first descriptor? Because proctxdesc()671* is called on the final descriptor in an MPDU or A-MPDU -672* ie, the one that gets updated by the hardware upon673* completion. That way proctxdesc() doesn't need to know674* about the first _and_ last TX descriptor.675*/676ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);677678DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);679}680681/*682* Hand-off a frame to the multicast TX queue.683*684* This is a software TXQ which will be appended to the CAB queue685* during the beacon setup code.686*687* XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID688* as part of the TX descriptor, bf_state.bfs_tx_queue must be updated689* with the actual hardware txq, or all of this will fall apart.690*691* XXX It may not be a bad idea to just stuff the QCU ID into bf_state692* and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated693* correctly.694*/695static void696ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,697struct ath_buf *bf)698{699ATH_TX_LOCK_ASSERT(sc);700701KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,702("%s: busy status 0x%x", __func__, bf->bf_flags));703704/*705* Ensure that the tx queue is the cabq, so things get706* mapped correctly.707*/708if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {709DPRINTF(sc, ATH_DEBUG_XMIT,710"%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",711__func__, bf, bf->bf_state.bfs_tx_queue,712txq->axq_qnum);713}714715ATH_TXQ_LOCK(txq);716if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {717struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);718struct ieee80211_frame *wh;719720/* mark previous frame */721wh = mtod(bf_last->bf_m, struct ieee80211_frame *);722wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;723bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,724BUS_DMASYNC_PREWRITE);725726/* link descriptor */727ath_hal_settxdesclink(sc->sc_ah,728bf_last->bf_lastds,729bf->bf_daddr);730}731ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);732ATH_TXQ_UNLOCK(txq);733}734735/*736* Hand-off packet to a hardware queue.737*/738static void739ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,740struct ath_buf *bf)741{742struct ath_hal *ah = sc->sc_ah;743struct ath_buf *bf_first;744745/*746* Insert the frame on the outbound list and pass it on747* to the hardware. Multicast frames buffered for power748* save stations and transmit from the CAB queue are stored749* on a s/w only queue and loaded on to the CAB queue in750* the SWBA handler since frames only go out on DTIM and751* to avoid possible races.752*/753ATH_TX_LOCK_ASSERT(sc);754KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,755("%s: busy status 0x%x", __func__, bf->bf_flags));756KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,757("ath_tx_handoff_hw called for mcast queue"));758759/*760* XXX We should instead just verify that sc_txstart_cnt761* or ath_txproc_cnt > 0. That would mean that762* the reset is going to be waiting for us to complete.763*/764if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {765device_printf(sc->sc_dev,766"%s: TX dispatch without holding txcount/txstart refcnt!\n",767__func__);768}769770/*771* XXX .. this is going to cause the hardware to get upset;772* so we really should find some way to drop or queue773* things.774*/775776ATH_TXQ_LOCK(txq);777778/*779* XXX TODO: if there's a holdingbf, then780* ATH_TXQ_PUTRUNNING should be clear.781*782* If there is a holdingbf and the list is empty,783* then axq_link should be pointing to the holdingbf.784*785* Otherwise it should point to the last descriptor786* in the last ath_buf.787*788* In any case, we should really ensure that we789* update the previous descriptor link pointer to790* this descriptor, regardless of all of the above state.791*792* For now this is captured by having axq_link point793* to either the holdingbf (if the TXQ list is empty)794* or the end of the list (if the TXQ list isn't empty.)795* I'd rather just kill axq_link here and do it as above.796*/797798/*799* Append the frame to the TX queue.800*/801ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);802ATH_KTR(sc, ATH_KTR_TX, 3,803"ath_tx_handoff: non-tdma: txq=%u, add bf=%p "804"depth=%d",805txq->axq_qnum,806bf,807txq->axq_depth);808809/*810* If there's a link pointer, update it.811*812* XXX we should replace this with the above logic, just813* to kill axq_link with fire.814*/815if (txq->axq_link != NULL) {816*txq->axq_link = bf->bf_daddr;817DPRINTF(sc, ATH_DEBUG_XMIT,818"%s: link[%u](%p)=%p (%p) depth %d\n", __func__,819txq->axq_qnum, txq->axq_link,820(caddr_t)bf->bf_daddr, bf->bf_desc,821txq->axq_depth);822ATH_KTR(sc, ATH_KTR_TX, 5,823"ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "824"lastds=%d",825txq->axq_qnum, txq->axq_link,826(caddr_t)bf->bf_daddr, bf->bf_desc,827bf->bf_lastds);828}829830/*831* If we've not pushed anything into the hardware yet,832* push the head of the queue into the TxDP.833*834* Once we've started DMA, there's no guarantee that835* updating the TxDP with a new value will actually work.836* So we just don't do that - if we hit the end of the list,837* we keep that buffer around (the "holding buffer") and838* re-start DMA by updating the link pointer of _that_839* descriptor and then restart DMA.840*/841if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {842bf_first = TAILQ_FIRST(&txq->axq_q);843txq->axq_flags |= ATH_TXQ_PUTRUNNING;844ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);845DPRINTF(sc, ATH_DEBUG_XMIT,846"%s: TXDP[%u] = %p (%p) depth %d\n",847__func__, txq->axq_qnum,848(caddr_t)bf_first->bf_daddr, bf_first->bf_desc,849txq->axq_depth);850ATH_KTR(sc, ATH_KTR_TX, 5,851"ath_tx_handoff: TXDP[%u] = %p (%p) "852"lastds=%p depth %d",853txq->axq_qnum,854(caddr_t)bf_first->bf_daddr, bf_first->bf_desc,855bf_first->bf_lastds,856txq->axq_depth);857}858859/*860* Ensure that the bf TXQ matches this TXQ, so later861* checking and holding buffer manipulation is sane.862*/863if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {864DPRINTF(sc, ATH_DEBUG_XMIT,865"%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",866__func__, bf, bf->bf_state.bfs_tx_queue,867txq->axq_qnum);868}869870/*871* Track aggregate queue depth.872*/873if (bf->bf_state.bfs_aggr)874txq->axq_aggr_depth++;875876/*877* Update the link pointer.878*/879ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);880881/*882* Start DMA.883*884* If we wrote a TxDP above, DMA will start from here.885*886* If DMA is running, it'll do nothing.887*888* If the DMA engine hit the end of the QCU list (ie LINK=NULL,889* or VEOL) then it stops at the last transmitted write.890* We then append a new frame by updating the link pointer891* in that descriptor and then kick TxE here; it will re-read892* that last descriptor and find the new descriptor to transmit.893*894* This is why we keep the holding descriptor around.895*/896ath_hal_txstart(ah, txq->axq_qnum);897ATH_TXQ_UNLOCK(txq);898ATH_KTR(sc, ATH_KTR_TX, 1,899"ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);900}901902/*903* Restart TX DMA for the given TXQ.904*905* This must be called whether the queue is empty or not.906*/907static void908ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)909{910struct ath_buf *bf, *bf_last;911912ATH_TXQ_LOCK_ASSERT(txq);913914/* XXX make this ATH_TXQ_FIRST */915bf = TAILQ_FIRST(&txq->axq_q);916bf_last = ATH_TXQ_LAST(txq, axq_q_s);917918if (bf == NULL)919return;920921DPRINTF(sc, ATH_DEBUG_RESET,922"%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",923__func__,924txq->axq_qnum,925bf,926bf_last,927(uint32_t) bf->bf_daddr);928929#ifdef ATH_DEBUG930if (sc->sc_debug & ATH_DEBUG_RESET)931ath_tx_dump(sc, txq);932#endif933934/*935* This is called from a restart, so DMA is known to be936* completely stopped.937*/938KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),939("%s: Q%d: called with PUTRUNNING=1\n",940__func__,941txq->axq_qnum));942943ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);944txq->axq_flags |= ATH_TXQ_PUTRUNNING;945946ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,947&txq->axq_link);948ath_hal_txstart(sc->sc_ah, txq->axq_qnum);949}950951/*952* Hand off a packet to the hardware (or mcast queue.)953*954* The relevant hardware txq should be locked.955*/956static void957ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,958struct ath_buf *bf)959{960ATH_TX_LOCK_ASSERT(sc);961962#ifdef ATH_DEBUG_ALQ963if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))964ath_tx_alq_post(sc, bf);965#endif966967if (txq->axq_qnum == ATH_TXQ_SWQ)968ath_tx_handoff_mcast(sc, txq, bf);969else970ath_tx_handoff_hw(sc, txq, bf);971}972973/*974* Setup a frame for encryption.975*976* If this fails, then an non-zero error is returned. The mbuf977* must be freed by the caller.978*/979static int980ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,981struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,982int *keyix)983{984DPRINTF(sc, ATH_DEBUG_XMIT,985"%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",986__func__,987*hdrlen,988*pktlen,989isfrag,990iswep,991m0);992993if (iswep) {994const struct ieee80211_cipher *cip;995struct ieee80211_key *k;996997/*998* Construct the 802.11 header+trailer for an encrypted999* frame. The only reason this can fail is because of an1000* unknown or unsupported cipher/key type.1001*/1002k = ieee80211_crypto_encap(ni, m0);1003if (k == NULL) {1004/*1005* This can happen when the key is yanked after the1006* frame was queued. Just discard the frame; the1007* 802.11 layer counts failures and provides1008* debugging/diagnostics.1009*/1010return (0);1011}1012/*1013* Adjust the packet + header lengths for the crypto1014* additions and calculate the h/w key index. When1015* a s/w mic is done the frame will have had any mic1016* added to it prior to entry so m0->m_pkthdr.len will1017* account for it. Otherwise we need to add it to the1018* packet length.1019*/1020cip = k->wk_cipher;1021(*hdrlen) += cip->ic_header;1022(*pktlen) += cip->ic_header + cip->ic_trailer;1023/* NB: frags always have any TKIP MIC done in s/w */1024if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)1025(*pktlen) += cip->ic_miclen;1026(*keyix) = k->wk_keyix;1027} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {1028/*1029* Use station key cache slot, if assigned.1030*/1031(*keyix) = ni->ni_ucastkey.wk_keyix;1032if ((*keyix) == IEEE80211_KEYIX_NONE)1033(*keyix) = HAL_TXKEYIX_INVALID;1034} else1035(*keyix) = HAL_TXKEYIX_INVALID;10361037return (1);1038}10391040/*1041* Calculate whether interoperability protection is required for1042* this frame.1043*1044* This requires the rate control information be filled in,1045* as the protection requirement depends upon the current1046* operating mode / PHY.1047*/1048static void1049ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)1050{1051struct ieee80211_frame *wh;1052uint8_t rix;1053uint16_t flags;1054int shortPreamble;1055const HAL_RATE_TABLE *rt = sc->sc_currates;1056struct ieee80211com *ic = &sc->sc_ic;10571058flags = bf->bf_state.bfs_txflags;1059rix = bf->bf_state.bfs_rc[0].rix;1060shortPreamble = bf->bf_state.bfs_shpream;1061wh = mtod(bf->bf_m, struct ieee80211_frame *);10621063/* Disable frame protection for TOA probe frames */1064if (bf->bf_flags & ATH_BUF_TOA_PROBE) {1065/* XXX count */1066flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA);1067bf->bf_state.bfs_doprot = 0;1068goto finish;1069}10701071/*1072* If 802.11g protection is enabled, determine whether1073* to use RTS/CTS or just CTS. Note that this is only1074* done for OFDM unicast frames.1075*/1076if ((ic->ic_flags & IEEE80211_F_USEPROT) &&1077rt->info[rix].phy == IEEE80211_T_OFDM &&1078(flags & HAL_TXDESC_NOACK) == 0) {1079bf->bf_state.bfs_doprot = 1;1080/* XXX fragments must use CCK rates w/ protection */1081if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {1082flags |= HAL_TXDESC_RTSENA;1083} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {1084flags |= HAL_TXDESC_CTSENA;1085}1086/*1087* For frags it would be desirable to use the1088* highest CCK rate for RTS/CTS. But stations1089* farther away may detect it at a lower CCK rate1090* so use the configured protection rate instead1091* (for now).1092*/1093sc->sc_stats.ast_tx_protect++;1094}10951096/*1097* If 11n protection is enabled and it's a HT frame,1098* enable RTS.1099*1100* XXX ic_htprotmode or ic_curhtprotmode?1101* XXX should it_htprotmode only matter if ic_curhtprotmode1102* XXX indicates it's not a HT pure environment?1103*/1104if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&1105rt->info[rix].phy == IEEE80211_T_HT &&1106(flags & HAL_TXDESC_NOACK) == 0) {1107flags |= HAL_TXDESC_RTSENA;1108sc->sc_stats.ast_tx_htprotect++;1109}11101111finish:1112bf->bf_state.bfs_txflags = flags;1113}11141115/*1116* Update the frame duration given the currently selected rate.1117*1118* This also updates the frame duration value, so it will require1119* a DMA flush.1120*/1121static void1122ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)1123{1124struct ieee80211_frame *wh;1125uint8_t rix;1126uint16_t flags;1127int shortPreamble;1128struct ath_hal *ah = sc->sc_ah;1129const HAL_RATE_TABLE *rt = sc->sc_currates;1130int isfrag = bf->bf_m->m_flags & M_FRAG;11311132flags = bf->bf_state.bfs_txflags;1133rix = bf->bf_state.bfs_rc[0].rix;1134shortPreamble = bf->bf_state.bfs_shpream;1135wh = mtod(bf->bf_m, struct ieee80211_frame *);11361137/*1138* Calculate duration. This logically belongs in the 802.111139* layer but it lacks sufficient information to calculate it.1140*/1141if ((flags & HAL_TXDESC_NOACK) == 0 && !IEEE80211_IS_CTL(wh)) {1142u_int16_t dur;1143if (shortPreamble)1144dur = rt->info[rix].spAckDuration;1145else1146dur = rt->info[rix].lpAckDuration;1147if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {1148dur += dur; /* additional SIFS+ACK */1149/*1150* Include the size of next fragment so NAV is1151* updated properly. The last fragment uses only1152* the ACK duration1153*1154* XXX TODO: ensure that the rate lookup for each1155* fragment is the same as the rate used by the1156* first fragment!1157*/1158dur += ath_hal_computetxtime(ah,1159rt,1160bf->bf_nextfraglen,1161rix, shortPreamble,1162AH_TRUE);1163}1164if (isfrag) {1165/*1166* Force hardware to use computed duration for next1167* fragment by disabling multi-rate retry which updates1168* duration based on the multi-rate duration table.1169*/1170bf->bf_state.bfs_ismrr = 0;1171bf->bf_state.bfs_try0 = ATH_TXMGTTRY;1172/* XXX update bfs_rc[0].try? */1173}11741175/* Update the duration field itself */1176*(u_int16_t *)wh->i_dur = htole16(dur);1177}1178}11791180static uint8_t1181ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,1182int cix, int shortPreamble)1183{1184uint8_t ctsrate;11851186/*1187* CTS transmit rate is derived from the transmit rate1188* by looking in the h/w rate table. We must also factor1189* in whether or not a short preamble is to be used.1190*/1191/* NB: cix is set above where RTS/CTS is enabled */1192KASSERT(cix != 0xff, ("cix not setup"));1193ctsrate = rt->info[cix].rateCode;11941195/* XXX this should only matter for legacy rates */1196if (shortPreamble)1197ctsrate |= rt->info[cix].shortPreamble;11981199return (ctsrate);1200}12011202/*1203* Calculate the RTS/CTS duration for legacy frames.1204*/1205static int1206ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,1207int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,1208int flags)1209{1210int ctsduration = 0;12111212/* This mustn't be called for HT modes */1213if (rt->info[cix].phy == IEEE80211_T_HT) {1214printf("%s: HT rate where it shouldn't be (0x%x)\n",1215__func__, rt->info[cix].rateCode);1216return (-1);1217}12181219/*1220* Compute the transmit duration based on the frame1221* size and the size of an ACK frame. We call into the1222* HAL to do the computation since it depends on the1223* characteristics of the actual PHY being used.1224*1225* NB: CTS is assumed the same size as an ACK so we can1226* use the precalculated ACK durations.1227*/1228if (shortPreamble) {1229if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */1230ctsduration += rt->info[cix].spAckDuration;1231ctsduration += ath_hal_computetxtime(ah,1232rt, pktlen, rix, AH_TRUE, AH_TRUE);1233if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */1234ctsduration += rt->info[rix].spAckDuration;1235} else {1236if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */1237ctsduration += rt->info[cix].lpAckDuration;1238ctsduration += ath_hal_computetxtime(ah,1239rt, pktlen, rix, AH_FALSE, AH_TRUE);1240if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */1241ctsduration += rt->info[rix].lpAckDuration;1242}12431244return (ctsduration);1245}12461247/*1248* Update the given ath_buf with updated rts/cts setup and duration1249* values.1250*1251* To support rate lookups for each software retry, the rts/cts rate1252* and cts duration must be re-calculated.1253*1254* This function assumes the RTS/CTS flags have been set as needed;1255* mrr has been disabled; and the rate control lookup has been done.1256*1257* XXX TODO: MRR need only be disabled for the pre-11n NICs.1258* XXX The 11n NICs support per-rate RTS/CTS configuration.1259*/1260static void1261ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)1262{1263uint16_t ctsduration = 0;1264uint8_t ctsrate = 0;1265uint8_t rix = bf->bf_state.bfs_rc[0].rix;1266uint8_t cix = 0;1267const HAL_RATE_TABLE *rt = sc->sc_currates;12681269/*1270* No RTS/CTS enabled? Don't bother.1271*/1272if ((bf->bf_state.bfs_txflags &1273(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {1274/* XXX is this really needed? */1275bf->bf_state.bfs_ctsrate = 0;1276bf->bf_state.bfs_ctsduration = 0;1277return;1278}12791280/*1281* If protection is enabled, use the protection rix control1282* rate. Otherwise use the rate0 control rate.1283*/1284if (bf->bf_state.bfs_doprot)1285rix = sc->sc_protrix;1286else1287rix = bf->bf_state.bfs_rc[0].rix;12881289/*1290* If the raw path has hard-coded ctsrate0 to something,1291* use it.1292*/1293if (bf->bf_state.bfs_ctsrate0 != 0)1294cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);1295else1296/* Control rate from above */1297cix = rt->info[rix].controlRate;12981299/* Calculate the rtscts rate for the given cix */1300ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,1301bf->bf_state.bfs_shpream);13021303/* The 11n chipsets do ctsduration calculations for you */1304if (! ath_tx_is_11n(sc))1305ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,1306bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,1307rt, bf->bf_state.bfs_txflags);13081309/* Squirrel away in ath_buf */1310bf->bf_state.bfs_ctsrate = ctsrate;1311bf->bf_state.bfs_ctsduration = ctsduration;13121313/*1314* Must disable multi-rate retry when using RTS/CTS.1315*/1316if (!sc->sc_mrrprot) {1317bf->bf_state.bfs_ismrr = 0;1318bf->bf_state.bfs_try0 =1319bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */1320}1321}13221323/*1324* Setup the descriptor chain for a normal or fast-frame1325* frame.1326*1327* XXX TODO: extend to include the destination hardware QCU ID.1328* Make sure that is correct. Make sure that when being added1329* to the mcastq, the CABQ QCUID is set or things will get a bit1330* odd.1331*/1332static void1333ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)1334{1335struct ath_desc *ds = bf->bf_desc;1336struct ath_hal *ah = sc->sc_ah;13371338if (bf->bf_state.bfs_txrate0 == 0)1339DPRINTF(sc, ATH_DEBUG_XMIT,1340"%s: bf=%p, txrate0=%d\n", __func__, bf, 0);13411342ath_hal_setuptxdesc(ah, ds1343, bf->bf_state.bfs_pktlen /* packet length */1344, bf->bf_state.bfs_hdrlen /* header length */1345, bf->bf_state.bfs_atype /* Atheros packet type */1346, bf->bf_state.bfs_txpower /* txpower */1347, bf->bf_state.bfs_txrate01348, bf->bf_state.bfs_try0 /* series 0 rate/tries */1349, bf->bf_state.bfs_keyix /* key cache index */1350, bf->bf_state.bfs_txantenna /* antenna mode */1351, bf->bf_state.bfs_txflags /* flags */1352, bf->bf_state.bfs_ctsrate /* rts/cts rate */1353, bf->bf_state.bfs_ctsduration /* rts/cts duration */1354);13551356/*1357* This will be overridden when the descriptor chain is written.1358*/1359bf->bf_lastds = ds;1360bf->bf_last = bf;13611362/* Set rate control and descriptor chain for this frame */1363ath_tx_set_ratectrl(sc, bf->bf_node, bf);1364ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);1365}13661367/*1368* Do a rate lookup.1369*1370* This performs a rate lookup for the given ath_buf only if it's required.1371* Non-data frames and raw frames don't require it.1372*1373* This populates the primary and MRR entries; MRR values are1374* then disabled later on if something requires it (eg RTS/CTS on1375* pre-11n chipsets.1376*1377* This needs to be done before the RTS/CTS fields are calculated1378* as they may depend upon the rate chosen.1379*/1380static void1381ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid,1382int pktlen, int is_aggr)1383{1384uint8_t rate, rix;1385int try0;1386int maxdur; // Note: Unused for now1387int maxpktlen;13881389if (! bf->bf_state.bfs_doratelookup)1390return;13911392/* Get rid of any previous state */1393bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));13941395ATH_NODE_LOCK(ATH_NODE(bf->bf_node));1396ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,1397pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur, &maxpktlen);13981399/* In case MRR is disabled, make sure rc[0] is setup correctly */1400bf->bf_state.bfs_rc[0].rix = rix;1401bf->bf_state.bfs_rc[0].ratecode = rate;1402bf->bf_state.bfs_rc[0].tries = try0;14031404if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)1405ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,1406is_aggr, bf->bf_state.bfs_rc);1407ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));14081409sc->sc_txrix = rix; /* for LED blinking */1410sc->sc_lastdatarix = rix; /* for fast frames */1411bf->bf_state.bfs_try0 = try0;1412bf->bf_state.bfs_txrate0 = rate;1413bf->bf_state.bfs_rc_maxpktlen = maxpktlen;1414}14151416/*1417* Update the CLRDMASK bit in the ath_buf if it needs to be set.1418*/1419static void1420ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,1421struct ath_buf *bf)1422{1423struct ath_node *an = ATH_NODE(bf->bf_node);14241425ATH_TX_LOCK_ASSERT(sc);14261427if (an->clrdmask == 1) {1428bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;1429an->clrdmask = 0;1430}1431}14321433/*1434* Return whether this frame should be software queued or1435* direct dispatched.1436*1437* When doing powersave, BAR frames should be queued but other management1438* frames should be directly sent.1439*1440* When not doing powersave, stick BAR frames into the hardware queue1441* so it goes out even though the queue is paused.1442*1443* For now, management frames are also software queued by default.1444*/1445static int1446ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,1447struct mbuf *m0, int *queue_to_head)1448{1449struct ieee80211_node *ni = &an->an_node;1450struct ieee80211_frame *wh;1451uint8_t type, subtype;14521453wh = mtod(m0, struct ieee80211_frame *);1454type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;1455subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;14561457(*queue_to_head) = 0;14581459/* If it's not in powersave - direct-dispatch BAR */1460if ((ATH_NODE(ni)->an_is_powersave == 0)1461&& type == IEEE80211_FC0_TYPE_CTL &&1462subtype == IEEE80211_FC0_SUBTYPE_BAR) {1463DPRINTF(sc, ATH_DEBUG_SW_TX,1464"%s: BAR: TX'ing direct\n", __func__);1465return (0);1466} else if ((ATH_NODE(ni)->an_is_powersave == 1)1467&& type == IEEE80211_FC0_TYPE_CTL &&1468subtype == IEEE80211_FC0_SUBTYPE_BAR) {1469/* BAR TX whilst asleep; queue */1470DPRINTF(sc, ATH_DEBUG_SW_TX,1471"%s: swq: TX'ing\n", __func__);1472(*queue_to_head) = 1;1473return (1);1474} else if ((ATH_NODE(ni)->an_is_powersave == 1)1475&& (type == IEEE80211_FC0_TYPE_MGT ||1476type == IEEE80211_FC0_TYPE_CTL)) {1477/*1478* Other control/mgmt frame; bypass software queuing1479* for now!1480*/1481DPRINTF(sc, ATH_DEBUG_XMIT,1482"%s: %6D: Node is asleep; sending mgmt "1483"(type=%d, subtype=%d)\n",1484__func__, ni->ni_macaddr, ":", type, subtype);1485return (0);1486} else {1487return (1);1488}1489}14901491/*1492* Transmit the given frame to the hardware.1493*1494* The frame must already be setup; rate control must already have1495* been done.1496*1497* XXX since the TXQ lock is being held here (and I dislike holding1498* it for this long when not doing software aggregation), later on1499* break this function into "setup_normal" and "xmit_normal". The1500* lock only needs to be held for the ath_tx_handoff call.1501*1502* XXX we don't update the leak count here - if we're doing1503* direct frame dispatch, we need to be able to do it without1504* decrementing the leak count (eg multicast queue frames.)1505*/1506static void1507ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,1508struct ath_buf *bf)1509{1510struct ath_node *an = ATH_NODE(bf->bf_node);1511struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];15121513ATH_TX_LOCK_ASSERT(sc);15141515/*1516* For now, just enable CLRDMASK. ath_tx_xmit_normal() does1517* set a completion handler however it doesn't (yet) properly1518* handle the strict ordering requirements needed for normal,1519* non-aggregate session frames.1520*1521* Once this is implemented, only set CLRDMASK like this for1522* frames that must go out - eg management/raw frames.1523*/1524bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;15251526/* Setup the descriptor before handoff */1527ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);1528ath_tx_calc_duration(sc, bf);1529ath_tx_calc_protection(sc, bf);1530ath_tx_set_rtscts(sc, bf);1531ath_tx_rate_fill_rcflags(sc, bf);1532ath_tx_setds(sc, bf);15331534/* Track per-TID hardware queue depth correctly */1535tid->hwq_depth++;15361537/* Assign the completion handler */1538bf->bf_comp = ath_tx_normal_comp;15391540/* Hand off to hardware */1541ath_tx_handoff(sc, txq, bf);1542}15431544/*1545* Do the basic frame setup stuff that's required before the frame1546* is added to a software queue.1547*1548* All frames get mostly the same treatment and it's done once.1549* Retransmits fiddle with things like the rate control setup,1550* setting the retransmit bit in the packet; doing relevant DMA/bus1551* syncing and relinking it (back) into the hardware TX queue.1552*1553* Note that this may cause the mbuf to be reallocated, so1554* m0 may not be valid.1555*1556* If there's a problem then the mbuf is freed and an error1557* is returned. The ath_buf then needs to be freed by the1558* caller.1559*/1560static int1561ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,1562struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)1563{1564struct ieee80211vap *vap = ni->ni_vap;1565struct ieee80211com *ic = &sc->sc_ic;1566int error, iswep, ismcast, isfrag, ismrr;1567int keyix, hdrlen, pktlen, try0 = 0;1568u_int8_t rix = 0, txrate = 0;1569struct ath_desc *ds;1570struct ieee80211_frame *wh;1571u_int subtype, flags;1572HAL_PKT_TYPE atype;1573const HAL_RATE_TABLE *rt;1574HAL_BOOL shortPreamble;1575struct ath_node *an;15761577/* XXX TODO: this pri is only used for non-QoS check, right? */1578u_int pri;15791580/*1581* To ensure that both sequence numbers and the CCMP PN handling1582* is "correct", make sure that the relevant TID queue is locked.1583* Otherwise the CCMP PN and seqno may appear out of order, causing1584* re-ordered frames to have out of order CCMP PN's, resulting1585* in many, many frame drops.1586*/1587ATH_TX_LOCK_ASSERT(sc);15881589wh = mtod(m0, struct ieee80211_frame *);1590iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;1591ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);1592isfrag = m0->m_flags & M_FRAG;1593hdrlen = ieee80211_anyhdrsize(wh);1594/*1595* Packet length must not include any1596* pad bytes; deduct them here.1597*/1598pktlen = m0->m_pkthdr.len - (hdrlen & 3);15991600/* seqno allocate, only if AMPDU isn't running */1601if ((m0->m_flags & M_AMPDU_MPDU) == 0)1602ieee80211_output_seqno_assign(ni, -1, m0);16031604/* Handle encryption twiddling if needed */1605if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,1606&pktlen, &keyix)) {1607ieee80211_free_mbuf(m0);1608return EIO;1609}16101611/* packet header may have moved, reset our local pointer */1612wh = mtod(m0, struct ieee80211_frame *);16131614pktlen += IEEE80211_CRC_LEN;16151616/*1617* Load the DMA map so any coalescing is done. This1618* also calculates the number of descriptors we need.1619*/1620error = ath_tx_dmasetup(sc, bf, m0);1621if (error != 0)1622return error;1623KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));1624bf->bf_node = ni; /* NB: held reference */1625m0 = bf->bf_m; /* NB: may have changed */1626wh = mtod(m0, struct ieee80211_frame *);16271628/* setup descriptors */1629ds = bf->bf_desc;1630rt = sc->sc_currates;1631KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));16321633/*1634* NB: the 802.11 layer marks whether or not we should1635* use short preamble based on the current mode and1636* negotiated parameters.1637*/1638if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&1639(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {1640shortPreamble = AH_TRUE;1641sc->sc_stats.ast_tx_shortpre++;1642} else {1643shortPreamble = AH_FALSE;1644}16451646an = ATH_NODE(ni);1647//flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */1648flags = 0;1649ismrr = 0; /* default no multi-rate retry*/16501651pri = ath_tx_getac(sc, m0); /* honor classification */1652/* XXX use txparams instead of fixed values */1653/*1654* Calculate Atheros packet type from IEEE80211 packet header,1655* setup for rate calculations, and select h/w transmit queue.1656*/1657switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {1658case IEEE80211_FC0_TYPE_MGT:1659subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;1660if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)1661atype = HAL_PKT_TYPE_BEACON;1662else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)1663atype = HAL_PKT_TYPE_PROBE_RESP;1664else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)1665atype = HAL_PKT_TYPE_ATIM;1666else1667atype = HAL_PKT_TYPE_NORMAL; /* XXX */1668rix = an->an_mgmtrix;1669txrate = rt->info[rix].rateCode;1670if (shortPreamble)1671txrate |= rt->info[rix].shortPreamble;1672try0 = ATH_TXMGTTRY;1673flags |= HAL_TXDESC_INTREQ; /* force interrupt */1674break;1675case IEEE80211_FC0_TYPE_CTL:1676atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */1677rix = an->an_mgmtrix;1678txrate = rt->info[rix].rateCode;1679if (shortPreamble)1680txrate |= rt->info[rix].shortPreamble;1681try0 = ATH_TXMGTTRY;1682flags |= HAL_TXDESC_INTREQ; /* force interrupt */1683break;1684case IEEE80211_FC0_TYPE_DATA:1685atype = HAL_PKT_TYPE_NORMAL; /* default */1686/*1687* Data frames: multicast frames go out at a fixed rate,1688* EAPOL frames use the mgmt frame rate; otherwise consult1689* the rate control module for the rate to use.1690*/1691if (ismcast) {1692rix = an->an_mcastrix;1693txrate = rt->info[rix].rateCode;1694if (shortPreamble)1695txrate |= rt->info[rix].shortPreamble;1696try0 = 1;1697} else if (m0->m_flags & M_EAPOL) {1698/* XXX? maybe always use long preamble? */1699rix = an->an_mgmtrix;1700txrate = rt->info[rix].rateCode;1701if (shortPreamble)1702txrate |= rt->info[rix].shortPreamble;1703try0 = ATH_TXMAXTRY; /* XXX?too many? */1704} else {1705/*1706* Do rate lookup on each TX, rather than using1707* the hard-coded TX information decided here.1708*/1709ismrr = 1;1710bf->bf_state.bfs_doratelookup = 1;1711}17121713/*1714* Check whether to set NOACK for this WME category or not.1715*/1716if (ieee80211_wme_vap_ac_is_noack(vap, pri))1717flags |= HAL_TXDESC_NOACK;1718break;1719default:1720device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",1721wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);1722/* XXX statistic */1723/* XXX free tx dmamap */1724ieee80211_free_mbuf(m0);1725return EIO;1726}17271728/*1729* There are two known scenarios where the frame AC doesn't match1730* what the destination TXQ is.1731*1732* + non-QoS frames (eg management?) that the net80211 stack has1733* assigned a higher AC to, but since it's a non-QoS TID, it's1734* being thrown into TID 16. TID 16 gets the AC_BE queue.1735* It's quite possible that management frames should just be1736* direct dispatched to hardware rather than go via the software1737* queue; that should be investigated in the future. There are1738* some specific scenarios where this doesn't make sense, mostly1739* surrounding ADDBA request/response - hence why that is special1740* cased.1741*1742* + Multicast frames going into the VAP mcast queue. That shows up1743* as "TXQ 11".1744*1745* This driver should eventually support separate TID and TXQ locking,1746* allowing for arbitrary AC frames to appear on arbitrary software1747* queues, being queued to the "correct" hardware queue when needed.1748*/1749#if 01750if (txq != sc->sc_ac2q[pri]) {1751DPRINTF(sc, ATH_DEBUG_XMIT,1752"%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",1753__func__,1754txq,1755txq->axq_qnum,1756pri,1757sc->sc_ac2q[pri],1758sc->sc_ac2q[pri]->axq_qnum);1759}1760#endif17611762/*1763* Calculate miscellaneous flags.1764*/1765if (ismcast) {1766flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */1767} else if (pktlen > vap->iv_rtsthreshold &&1768(ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {1769flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */1770sc->sc_stats.ast_tx_rts++;1771}1772if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */1773sc->sc_stats.ast_tx_noack++;1774#ifdef IEEE80211_SUPPORT_TDMA1775if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {1776DPRINTF(sc, ATH_DEBUG_TDMA,1777"%s: discard frame, ACK required w/ TDMA\n", __func__);1778sc->sc_stats.ast_tdma_ack++;1779/* XXX free tx dmamap */1780ieee80211_free_mbuf(m0);1781return EIO;1782}1783#endif17841785/*1786* If it's a frame to do location reporting on,1787* communicate it to the HAL.1788*/1789if (ieee80211_get_toa_params(m0, NULL)) {1790device_printf(sc->sc_dev,1791"%s: setting TX positioning bit\n", __func__);1792flags |= HAL_TXDESC_POS;17931794/*1795* Note: The hardware reports timestamps for1796* each of the RX'ed packets as part of the packet1797* exchange. So this means things like RTS/CTS1798* exchanges, as well as the final ACK.1799*1800* So, if you send a RTS-protected NULL data frame,1801* you'll get an RX report for the RTS response, then1802* an RX report for the NULL frame, and then the TX1803* completion at the end.1804*1805* NOTE: it doesn't work right for CCK frames;1806* there's no channel info data provided unless1807* it's OFDM or HT. Will have to dig into it.1808*/1809flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);1810bf->bf_flags |= ATH_BUF_TOA_PROBE;1811}18121813#if 01814/*1815* Placeholder: if you want to transmit with the azimuth1816* timestamp in the end of the payload, here's where you1817* should set the TXDESC field.1818*/1819flags |= HAL_TXDESC_HWTS;1820#endif18211822/*1823* Determine if a tx interrupt should be generated for1824* this descriptor. We take a tx interrupt to reap1825* descriptors when the h/w hits an EOL condition or1826* when the descriptor is specifically marked to generate1827* an interrupt. We periodically mark descriptors in this1828* way to insure timely replenishing of the supply needed1829* for sending frames. Defering interrupts reduces system1830* load and potentially allows more concurrent work to be1831* done but if done to aggressively can cause senders to1832* backup.1833*1834* NB: use >= to deal with sc_txintrperiod changing1835* dynamically through sysctl.1836*/1837if (flags & HAL_TXDESC_INTREQ) {1838txq->axq_intrcnt = 0;1839} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {1840flags |= HAL_TXDESC_INTREQ;1841txq->axq_intrcnt = 0;1842}18431844/* This point forward is actual TX bits */18451846/*1847* At this point we are committed to sending the frame1848* and we don't need to look at m_nextpkt; clear it in1849* case this frame is part of frag chain.1850*/1851m0->m_nextpkt = NULL;18521853if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))1854ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,1855sc->sc_hwmap[rix].ieeerate, -1);18561857if (ieee80211_radiotap_active_vap(vap)) {1858sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;1859if (iswep)1860sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;1861if (isfrag)1862sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;1863sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;1864sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);1865sc->sc_tx_th.wt_antenna = sc->sc_txantenna;18661867ieee80211_radiotap_tx(vap, m0);1868}18691870/* Blank the legacy rate array */1871bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));18721873/*1874* ath_buf_set_rate needs at least one rate/try to setup1875* the rate scenario.1876*/1877bf->bf_state.bfs_rc[0].rix = rix;1878bf->bf_state.bfs_rc[0].tries = try0;1879bf->bf_state.bfs_rc[0].ratecode = txrate;18801881/* Store the decided rate index values away */1882bf->bf_state.bfs_pktlen = pktlen;1883bf->bf_state.bfs_hdrlen = hdrlen;1884bf->bf_state.bfs_atype = atype;1885bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);1886bf->bf_state.bfs_txrate0 = txrate;1887bf->bf_state.bfs_try0 = try0;1888bf->bf_state.bfs_keyix = keyix;1889bf->bf_state.bfs_txantenna = sc->sc_txantenna;1890bf->bf_state.bfs_txflags = flags;1891bf->bf_state.bfs_shpream = shortPreamble;18921893/* XXX this should be done in ath_tx_setrate() */1894bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */1895bf->bf_state.bfs_ctsrate = 0; /* calculated later */1896bf->bf_state.bfs_ctsduration = 0;1897bf->bf_state.bfs_ismrr = ismrr;18981899return 0;1900}19011902/*1903* Queue a frame to the hardware or software queue.1904*1905* This can be called by the net80211 code.1906*1907* XXX what about locking? Or, push the seqno assign into the1908* XXX aggregate scheduler so its serialised?1909*1910* XXX When sending management frames via ath_raw_xmit(),1911* should CLRDMASK be set unconditionally?1912*/1913int1914ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,1915struct ath_buf *bf, struct mbuf *m0)1916{1917struct ieee80211vap *vap = ni->ni_vap;1918struct ath_vap *avp = ATH_VAP(vap);1919int r = 0;1920u_int pri;1921int tid;1922struct ath_txq *txq;1923int ismcast;1924const struct ieee80211_frame *wh;1925int is_ampdu, is_ampdu_tx, is_ampdu_pending;1926ieee80211_seq seqno;1927uint8_t type, subtype;1928int queue_to_head;19291930ATH_TX_LOCK_ASSERT(sc);19311932/*1933* Determine the target hardware queue.1934*1935* For multicast frames, the txq gets overridden appropriately1936* depending upon the state of PS. If powersave is enabled1937* then they get added to the cabq for later transmit.1938*1939* The "fun" issue here is that group addressed frames should1940* have the sequence number from a different pool, rather than1941* the per-TID pool. That means that even QoS group addressed1942* frames will have a sequence number from that global value,1943* which means if we transmit different group addressed frames1944* at different traffic priorities, the sequence numbers will1945* all be out of whack. So - chances are, the right thing1946* to do here is to always put group addressed frames into the BE1947* queue, and ignore the TID for queue selection.1948*1949* For any other frame, we do a TID/QoS lookup inside the frame1950* to see what the TID should be. If it's a non-QoS frame, the1951* AC and TID are overridden. The TID/TXQ code assumes the1952* TID is on a predictable hardware TXQ, so we don't support1953* having a node TID queued to multiple hardware TXQs.1954* This may change in the future but would require some locking1955* fudgery.1956*/1957pri = ath_tx_getac(sc, m0);1958tid = ath_tx_gettid(sc, m0);19591960txq = sc->sc_ac2q[pri];1961wh = mtod(m0, struct ieee80211_frame *);1962ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);1963type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;1964subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;19651966/*1967* Enforce how deep the multicast queue can grow.1968*1969* XXX duplicated in ath_raw_xmit().1970*/1971if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {1972if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth1973> sc->sc_txq_mcastq_maxdepth) {1974sc->sc_stats.ast_tx_mcastq_overflow++;1975m_freem(m0);1976return (ENOBUFS);1977}1978}19791980/*1981* Enforce how deep the unicast queue can grow.1982*1983* If the node is in power save then we don't want1984* the software queue to grow too deep, or a node may1985* end up consuming all of the ath_buf entries.1986*1987* For now, only do this for DATA frames.1988*1989* We will want to cap how many management/control1990* frames get punted to the software queue so it doesn't1991* fill up. But the correct solution isn't yet obvious.1992* In any case, this check should at least let frames pass1993* that we are direct-dispatching.1994*1995* XXX TODO: duplicate this to the raw xmit path!1996*/1997if (type == IEEE80211_FC0_TYPE_DATA &&1998ATH_NODE(ni)->an_is_powersave &&1999ATH_NODE(ni)->an_swq_depth >2000sc->sc_txq_node_psq_maxdepth) {2001sc->sc_stats.ast_tx_node_psq_overflow++;2002m_freem(m0);2003return (ENOBUFS);2004}20052006/* A-MPDU TX */2007is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);2008is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);2009is_ampdu = is_ampdu_tx | is_ampdu_pending;20102011DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",2012__func__, tid, pri, is_ampdu);20132014/* Set local packet state, used to queue packets to hardware */2015bf->bf_state.bfs_tid = tid;2016bf->bf_state.bfs_tx_queue = txq->axq_qnum;2017bf->bf_state.bfs_pri = pri;20182019#if 12020/*2021* When servicing one or more stations in power-save mode2022* (or) if there is some mcast data waiting on the mcast2023* queue (to prevent out of order delivery) multicast frames2024* must be bufferd until after the beacon.2025*2026* TODO: we should lock the mcastq before we check the length.2027*/2028if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {2029txq = &avp->av_mcastq;2030/*2031* Mark the frame as eventually belonging on the CAB2032* queue, so the descriptor setup functions will2033* correctly initialise the descriptor 'qcuId' field.2034*/2035bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;2036}2037#endif20382039/* Do the generic frame setup */2040/* XXX should just bzero the bf_state? */2041bf->bf_state.bfs_dobaw = 0;20422043/* A-MPDU TX? Manually set sequence number */2044/*2045* Don't do it whilst pending; the net80211 layer still2046* assigns them.2047*2048* Don't assign A-MPDU sequence numbers to group address2049* frames; they come from a different sequence number space.2050*/2051if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) {2052/*2053* Always call; this function will2054* handle making sure that null data frames2055* and group-addressed frames don't get a sequence number2056* from the current TID and thus mess with the BAW.2057*/2058seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);20592060/*2061* Don't add QoS NULL frames and group-addressed frames2062* to the BAW.2063*/2064if (IEEE80211_QOS_HAS_SEQ(wh) &&2065(! IEEE80211_IS_MULTICAST(wh->i_addr1)) &&2066(! IEEE80211_IS_QOS_NULL(wh))) {2067bf->bf_state.bfs_dobaw = 1;2068}2069}20702071/*2072* If needed, the sequence number has been assigned.2073* Squirrel it away somewhere easy to get to.2074*/2075bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;20762077/* Is ampdu pending? fetch the seqno and print it out */2078if (is_ampdu_pending)2079DPRINTF(sc, ATH_DEBUG_SW_TX,2080"%s: tid %d: ampdu pending, seqno %d\n",2081__func__, tid, M_SEQNO_GET(m0));20822083/* This also sets up the DMA map; crypto; frame parameters, etc */2084r = ath_tx_normal_setup(sc, ni, bf, m0, txq);2085if (r != 0)2086return (r);20872088/* At this point m0 could have changed! */2089m0 = bf->bf_m;20902091#if 12092/*2093* If it's a multicast frame, do a direct-dispatch to the2094* destination hardware queue. Don't bother software2095* queuing it.2096*/2097/*2098* If it's a BAR frame, do a direct dispatch to the2099* destination hardware queue. Don't bother software2100* queuing it, as the TID will now be paused.2101* Sending a BAR frame can occur from the net80211 txa timer2102* (ie, retries) or from the ath txtask (completion call.)2103* It queues directly to hardware because the TID is paused2104* at this point (and won't be unpaused until the BAR has2105* either been TXed successfully or max retries has been2106* reached.)2107*/2108/*2109* Until things are better debugged - if this node is asleep2110* and we're sending it a non-BAR frame, direct dispatch it.2111* Why? Because we need to figure out what's actually being2112* sent - eg, during reassociation/reauthentication after2113* the node (last) disappeared whilst asleep, the driver should2114* have unpaused/unsleep'ed the node. So until that is2115* sorted out, use this workaround.2116*/2117if (txq == &avp->av_mcastq) {2118DPRINTF(sc, ATH_DEBUG_SW_TX,2119"%s: bf=%p: mcastq: TX'ing\n", __func__, bf);2120bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;2121ath_tx_xmit_normal(sc, txq, bf);2122} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,2123&queue_to_head)) {2124ath_tx_swq(sc, ni, txq, queue_to_head, bf);2125} else {2126bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;2127ath_tx_xmit_normal(sc, txq, bf);2128}2129#else2130/*2131* For now, since there's no software queue,2132* direct-dispatch to the hardware.2133*/2134bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;2135/*2136* Update the current leak count if2137* we're leaking frames; and set the2138* MORE flag as appropriate.2139*/2140ath_tx_leak_count_update(sc, tid, bf);2141ath_tx_xmit_normal(sc, txq, bf);2142#endif2143return 0;2144}21452146static int2147ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,2148struct ath_buf *bf, struct mbuf *m0,2149const struct ieee80211_bpf_params *params)2150{2151struct ieee80211com *ic = &sc->sc_ic;2152struct ieee80211vap *vap = ni->ni_vap;2153int error, ismcast, ismrr;2154int keyix, hdrlen, pktlen, try0, txantenna;2155u_int8_t rix, txrate;2156struct ieee80211_frame *wh;2157u_int flags;2158HAL_PKT_TYPE atype;2159const HAL_RATE_TABLE *rt;2160struct ath_desc *ds;2161u_int pri;2162int o_tid = -1;2163int do_override;2164uint8_t type, subtype;2165int queue_to_head;2166struct ath_node *an = ATH_NODE(ni);21672168ATH_TX_LOCK_ASSERT(sc);21692170wh = mtod(m0, struct ieee80211_frame *);2171ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);2172hdrlen = ieee80211_anyhdrsize(wh);2173/*2174* Packet length must not include any2175* pad bytes; deduct them here.2176*/2177/* XXX honor IEEE80211_BPF_DATAPAD */2178pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;21792180type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;2181subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;21822183ATH_KTR(sc, ATH_KTR_TX, 2,2184"ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);21852186DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",2187__func__, ismcast);21882189pri = params->ibp_pri & 3;2190/* Override pri if the frame isn't a QoS one */2191if (! IEEE80211_QOS_HAS_SEQ(wh))2192pri = ath_tx_getac(sc, m0);21932194/* XXX If it's an ADDBA, override the correct queue */2195do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);21962197/* Map ADDBA to the correct priority */2198if (do_override) {2199#if 12200DPRINTF(sc, ATH_DEBUG_XMIT,2201"%s: overriding tid %d pri %d -> %d\n",2202__func__, o_tid, pri, TID_TO_WME_AC(o_tid));2203#endif2204pri = TID_TO_WME_AC(o_tid);2205}22062207/*2208* "pri" is the hardware queue to transmit on.2209*2210* Look at the description in ath_tx_start() to understand2211* what needs to be "fixed" here so we just use the TID2212* for QoS frames.2213*/22142215/* seqno allocate, only if AMPDU isn't running */2216if ((m0->m_flags & M_AMPDU_MPDU) == 0)2217ieee80211_output_seqno_assign(ni, -1, m0);22182219/* Handle encryption twiddling if needed */2220if (! ath_tx_tag_crypto(sc, ni,2221m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,2222&hdrlen, &pktlen, &keyix)) {2223ieee80211_free_mbuf(m0);2224return EIO;2225}2226/* packet header may have moved, reset our local pointer */2227wh = mtod(m0, struct ieee80211_frame *);22282229/* Do the generic frame setup */2230/* XXX should just bzero the bf_state? */2231bf->bf_state.bfs_dobaw = 0;22322233error = ath_tx_dmasetup(sc, bf, m0);2234if (error != 0)2235return error;2236m0 = bf->bf_m; /* NB: may have changed */2237wh = mtod(m0, struct ieee80211_frame *);2238KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));2239bf->bf_node = ni; /* NB: held reference */22402241/* Always enable CLRDMASK for raw frames for now.. */2242flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */2243flags |= HAL_TXDESC_INTREQ; /* force interrupt */2244if (params->ibp_flags & IEEE80211_BPF_RTS)2245flags |= HAL_TXDESC_RTSENA;2246else if (params->ibp_flags & IEEE80211_BPF_CTS) {2247/* XXX assume 11g/11n protection? */2248bf->bf_state.bfs_doprot = 1;2249flags |= HAL_TXDESC_CTSENA;2250}2251/* XXX leave ismcast to injector? */2252if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)2253flags |= HAL_TXDESC_NOACK;22542255rt = sc->sc_currates;2256KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));22572258/* Fetch first rate information */2259rix = ath_tx_findrix(sc, params->ibp_rate0);2260try0 = params->ibp_try0;22612262/*2263* Override EAPOL rate as appropriate.2264*/2265if (m0->m_flags & M_EAPOL) {2266/* XXX? maybe always use long preamble? */2267rix = an->an_mgmtrix;2268try0 = ATH_TXMAXTRY; /* XXX?too many? */2269}22702271/*2272* If it's a frame to do location reporting on,2273* communicate it to the HAL.2274*/2275if (ieee80211_get_toa_params(m0, NULL)) {2276device_printf(sc->sc_dev,2277"%s: setting TX positioning bit\n", __func__);2278flags |= HAL_TXDESC_POS;2279flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);2280bf->bf_flags |= ATH_BUF_TOA_PROBE;2281}22822283txrate = rt->info[rix].rateCode;2284if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)2285txrate |= rt->info[rix].shortPreamble;2286sc->sc_txrix = rix;2287ismrr = (params->ibp_try1 != 0);2288txantenna = params->ibp_pri >> 2;2289if (txantenna == 0) /* XXX? */2290txantenna = sc->sc_txantenna;22912292/*2293* Since ctsrate is fixed, store it away for later2294* use when the descriptor fields are being set.2295*/2296if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))2297bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;22982299/*2300* NB: we mark all packets as type PSPOLL so the h/w won't2301* set the sequence number, duration, etc.2302*/2303atype = HAL_PKT_TYPE_PSPOLL;23042305if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))2306ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,2307sc->sc_hwmap[rix].ieeerate, -1);23082309if (ieee80211_radiotap_active_vap(vap)) {2310sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;2311if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)2312sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;2313if (m0->m_flags & M_FRAG)2314sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;2315sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;2316sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,2317ieee80211_get_node_txpower(ni));2318sc->sc_tx_th.wt_antenna = sc->sc_txantenna;23192320ieee80211_radiotap_tx(vap, m0);2321}23222323/*2324* Formulate first tx descriptor with tx controls.2325*/2326ds = bf->bf_desc;2327/* XXX check return value? */23282329/* Store the decided rate index values away */2330bf->bf_state.bfs_pktlen = pktlen;2331bf->bf_state.bfs_hdrlen = hdrlen;2332bf->bf_state.bfs_atype = atype;2333bf->bf_state.bfs_txpower = MIN(params->ibp_power,2334ieee80211_get_node_txpower(ni));2335bf->bf_state.bfs_txrate0 = txrate;2336bf->bf_state.bfs_try0 = try0;2337bf->bf_state.bfs_keyix = keyix;2338bf->bf_state.bfs_txantenna = txantenna;2339bf->bf_state.bfs_txflags = flags;2340bf->bf_state.bfs_shpream =2341!! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);23422343/* Set local packet state, used to queue packets to hardware */2344bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);2345bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;2346bf->bf_state.bfs_pri = pri;23472348/* XXX this should be done in ath_tx_setrate() */2349bf->bf_state.bfs_ctsrate = 0;2350bf->bf_state.bfs_ctsduration = 0;2351bf->bf_state.bfs_ismrr = ismrr;23522353/* Blank the legacy rate array */2354bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));23552356bf->bf_state.bfs_rc[0].rix = rix;2357bf->bf_state.bfs_rc[0].tries = try0;2358bf->bf_state.bfs_rc[0].ratecode = txrate;23592360if (ismrr) {2361int rix;23622363rix = ath_tx_findrix(sc, params->ibp_rate1);2364bf->bf_state.bfs_rc[1].rix = rix;2365bf->bf_state.bfs_rc[1].tries = params->ibp_try1;23662367rix = ath_tx_findrix(sc, params->ibp_rate2);2368bf->bf_state.bfs_rc[2].rix = rix;2369bf->bf_state.bfs_rc[2].tries = params->ibp_try2;23702371rix = ath_tx_findrix(sc, params->ibp_rate3);2372bf->bf_state.bfs_rc[3].rix = rix;2373bf->bf_state.bfs_rc[3].tries = params->ibp_try3;2374}2375/*2376* All the required rate control decisions have been made;2377* fill in the rc flags.2378*/2379ath_tx_rate_fill_rcflags(sc, bf);23802381/* NB: no buffered multicast in power save support */23822383/*2384* If we're overiding the ADDBA destination, dump directly2385* into the hardware queue, right after any pending2386* frames to that node are.2387*/2388DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",2389__func__, do_override);23902391#if 12392/*2393* Put addba frames in the right place in the right TID/HWQ.2394*/2395if (do_override) {2396bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;2397/*2398* XXX if it's addba frames, should we be leaking2399* them out via the frame leak method?2400* XXX for now let's not risk it; but we may wish2401* to investigate this later.2402*/2403ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);2404} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,2405&queue_to_head)) {2406/* Queue to software queue */2407ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);2408} else {2409bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;2410ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);2411}2412#else2413/* Direct-dispatch to the hardware */2414bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;2415/*2416* Update the current leak count if2417* we're leaking frames; and set the2418* MORE flag as appropriate.2419*/2420ath_tx_leak_count_update(sc, tid, bf);2421ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);2422#endif2423return 0;2424}24252426/*2427* Send a raw frame.2428*2429* This can be called by net80211.2430*/2431int2432ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,2433const struct ieee80211_bpf_params *params)2434{2435struct ieee80211com *ic = ni->ni_ic;2436struct ath_softc *sc = ic->ic_softc;2437struct ath_buf *bf;2438struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);2439int error = 0;24402441ATH_PCU_LOCK(sc);2442if (sc->sc_inreset_cnt > 0) {2443DPRINTF(sc, ATH_DEBUG_XMIT,2444"%s: sc_inreset_cnt > 0; bailing\n", __func__);2445error = EIO;2446ATH_PCU_UNLOCK(sc);2447goto badbad;2448}2449sc->sc_txstart_cnt++;2450ATH_PCU_UNLOCK(sc);24512452/* Wake the hardware up already */2453ATH_LOCK(sc);2454ath_power_set_power_state(sc, HAL_PM_AWAKE);2455ATH_UNLOCK(sc);24562457ATH_TX_LOCK(sc);24582459if (!sc->sc_running || sc->sc_invalid) {2460DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",2461__func__, sc->sc_running, sc->sc_invalid);2462m_freem(m);2463error = ENETDOWN;2464goto bad;2465}24662467/*2468* Enforce how deep the multicast queue can grow.2469*2470* XXX duplicated in ath_tx_start().2471*/2472if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {2473if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth2474> sc->sc_txq_mcastq_maxdepth) {2475sc->sc_stats.ast_tx_mcastq_overflow++;2476error = ENOBUFS;2477}24782479if (error != 0) {2480m_freem(m);2481goto bad;2482}2483}24842485/*2486* Grab a TX buffer and associated resources.2487*/2488bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);2489if (bf == NULL) {2490sc->sc_stats.ast_tx_nobuf++;2491m_freem(m);2492error = ENOBUFS;2493goto bad;2494}2495ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",2496m, params, bf);24972498if (params == NULL) {2499/*2500* Legacy path; interpret frame contents to decide2501* precisely how to send the frame.2502*/2503if (ath_tx_start(sc, ni, bf, m)) {2504error = EIO; /* XXX */2505goto bad2;2506}2507} else {2508/*2509* Caller supplied explicit parameters to use in2510* sending the frame.2511*/2512if (ath_tx_raw_start(sc, ni, bf, m, params)) {2513error = EIO; /* XXX */2514goto bad2;2515}2516}2517sc->sc_wd_timer = 5;2518sc->sc_stats.ast_tx_raw++;25192520/*2521* Update the TIM - if there's anything queued to the2522* software queue and power save is enabled, we should2523* set the TIM.2524*/2525ath_tx_update_tim(sc, ni, 1);25262527ATH_TX_UNLOCK(sc);25282529ATH_PCU_LOCK(sc);2530sc->sc_txstart_cnt--;2531ATH_PCU_UNLOCK(sc);25322533/* Put the hardware back to sleep if required */2534ATH_LOCK(sc);2535ath_power_restore_power_state(sc);2536ATH_UNLOCK(sc);25372538return 0;25392540bad2:2541ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "2542"bf=%p",2543m,2544params,2545bf);2546ATH_TXBUF_LOCK(sc);2547ath_returnbuf_head(sc, bf);2548ATH_TXBUF_UNLOCK(sc);25492550bad:2551ATH_TX_UNLOCK(sc);25522553ATH_PCU_LOCK(sc);2554sc->sc_txstart_cnt--;2555ATH_PCU_UNLOCK(sc);25562557/* Put the hardware back to sleep if required */2558ATH_LOCK(sc);2559ath_power_restore_power_state(sc);2560ATH_UNLOCK(sc);25612562badbad:2563ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",2564m, params);2565sc->sc_stats.ast_tx_raw_fail++;25662567return error;2568}25692570/* Some helper functions */25712572/*2573* ADDBA (and potentially others) need to be placed in the same2574* hardware queue as the TID/node it's relating to. This is so2575* it goes out after any pending non-aggregate frames to the2576* same node/TID.2577*2578* If this isn't done, the ADDBA can go out before the frames2579* queued in hardware. Even though these frames have a sequence2580* number -earlier- than the ADDBA can be transmitted (but2581* no frames whose sequence numbers are after the ADDBA should2582* be!) they'll arrive after the ADDBA - and the receiving end2583* will simply drop them as being out of the BAW.2584*2585* The frames can't be appended to the TID software queue - it'll2586* never be sent out. So these frames have to be directly2587* dispatched to the hardware, rather than queued in software.2588* So if this function returns true, the TXQ has to be2589* overridden and it has to be directly dispatched.2590*2591* It's a dirty hack, but someone's gotta do it.2592*/25932594/*2595* Return an alternate TID for ADDBA request frames.2596*2597* Yes, this likely should be done in the net80211 layer.2598*/2599static int2600ath_tx_action_frame_override_queue(struct ath_softc *sc,2601struct ieee80211_node *ni,2602struct mbuf *m0, int *tid)2603{2604struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);2605struct ieee80211_action_ba_addbarequest *ia;2606uint8_t *frm;2607uint16_t baparamset;26082609/* Not action frame? Bail */2610if (! IEEE80211_IS_MGMT_ACTION(wh))2611return 0;26122613/* XXX Not needed for frames we send? */2614#if 02615/* Correct length? */2616if (! ieee80211_parse_action(ni, m))2617return 0;2618#endif26192620/* Extract out action frame */2621frm = (u_int8_t *)&wh[1];2622ia = (struct ieee80211_action_ba_addbarequest *) frm;26232624/* Not ADDBA? Bail */2625if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)2626return 0;2627if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)2628return 0;26292630/* Extract TID, return it */2631baparamset = le16toh(ia->rq_baparamset);2632*tid = (int) _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID);26332634return 1;2635}26362637/* Per-node software queue operations */26382639/*2640* Add the current packet to the given BAW.2641* It is assumed that the current packet2642*2643* + fits inside the BAW;2644* + already has had a sequence number allocated.2645*2646* Since the BAW status may be modified by both the ath task and2647* the net80211/ifnet contexts, the TID must be locked.2648*/2649void2650ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,2651struct ath_tid *tid, struct ath_buf *bf)2652{2653int index, cindex;2654struct ieee80211_tx_ampdu *tap;26552656ATH_TX_LOCK_ASSERT(sc);26572658if (bf->bf_state.bfs_isretried)2659return;26602661tap = ath_tx_get_tx_tid(an, tid->tid);26622663if (! bf->bf_state.bfs_dobaw) {2664DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2665"%s: dobaw=0, seqno=%d, window %d:%d\n",2666__func__, SEQNO(bf->bf_state.bfs_seqno),2667tap->txa_start, tap->txa_wnd);2668}26692670if (bf->bf_state.bfs_addedbaw)2671DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2672"%s: re-added? tid=%d, seqno %d; window %d:%d; "2673"baw head=%d tail=%d\n",2674__func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),2675tap->txa_start, tap->txa_wnd, tid->baw_head,2676tid->baw_tail);26772678/*2679* Verify that the given sequence number is not outside of the2680* BAW. Complain loudly if that's the case.2681*/2682if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,2683SEQNO(bf->bf_state.bfs_seqno))) {2684DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2685"%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "2686"baw head=%d tail=%d\n",2687__func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),2688tap->txa_start, tap->txa_wnd, tid->baw_head,2689tid->baw_tail);2690}26912692/*2693* ni->ni_txseqs[] is the currently allocated seqno.2694* the txa state contains the current baw start.2695*/2696index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));2697cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);2698DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2699"%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "2700"baw head=%d tail=%d\n",2701__func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),2702tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,2703tid->baw_tail);27042705#if 02706assert(tid->tx_buf[cindex] == NULL);2707#endif2708if (tid->tx_buf[cindex] != NULL) {2709DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2710"%s: ba packet dup (index=%d, cindex=%d, "2711"head=%d, tail=%d)\n",2712__func__, index, cindex, tid->baw_head, tid->baw_tail);2713DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2714"%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",2715__func__,2716tid->tx_buf[cindex],2717SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),2718bf,2719SEQNO(bf->bf_state.bfs_seqno)2720);2721}2722tid->tx_buf[cindex] = bf;27232724if (index >= ((tid->baw_tail - tid->baw_head) &2725(ATH_TID_MAX_BUFS - 1))) {2726tid->baw_tail = cindex;2727INCR(tid->baw_tail, ATH_TID_MAX_BUFS);2728}2729}27302731/*2732* Flip the BAW buffer entry over from the existing one to the new one.2733*2734* When software retransmitting a (sub-)frame, it is entirely possible that2735* the frame ath_buf is marked as BUSY and can't be immediately reused.2736* In that instance the buffer is cloned and the new buffer is used for2737* retransmit. We thus need to update the ath_buf slot in the BAW buf2738* tracking array to maintain consistency.2739*/2740static void2741ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,2742struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)2743{2744int index, cindex;2745struct ieee80211_tx_ampdu *tap;2746int seqno = SEQNO(old_bf->bf_state.bfs_seqno);27472748ATH_TX_LOCK_ASSERT(sc);27492750tap = ath_tx_get_tx_tid(an, tid->tid);2751index = ATH_BA_INDEX(tap->txa_start, seqno);2752cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);27532754/*2755* Just warn for now; if it happens then we should find out2756* about it. It's highly likely the aggregation session will2757* soon hang.2758*/2759if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {2760DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2761"%s: retransmitted buffer"2762" has mismatching seqno's, BA session may hang.\n",2763__func__);2764DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2765"%s: old seqno=%d, new_seqno=%d\n", __func__,2766old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);2767}27682769if (tid->tx_buf[cindex] != old_bf) {2770DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2771"%s: ath_buf pointer incorrect; "2772" has m BA session may hang.\n", __func__);2773DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2774"%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);2775}27762777tid->tx_buf[cindex] = new_bf;2778}27792780/*2781* seq_start - left edge of BAW2782* seq_next - current/next sequence number to allocate2783*2784* Since the BAW status may be modified by both the ath task and2785* the net80211/ifnet contexts, the TID must be locked.2786*/2787static void2788ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,2789struct ath_tid *tid, const struct ath_buf *bf)2790{2791int index, cindex;2792struct ieee80211_tx_ampdu *tap;2793int seqno = SEQNO(bf->bf_state.bfs_seqno);27942795ATH_TX_LOCK_ASSERT(sc);27962797tap = ath_tx_get_tx_tid(an, tid->tid);2798index = ATH_BA_INDEX(tap->txa_start, seqno);2799cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);28002801DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2802"%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "2803"baw head=%d, tail=%d\n",2804__func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,2805cindex, tid->baw_head, tid->baw_tail);28062807/*2808* If this occurs then we have a big problem - something else2809* has slid tap->txa_start along without updating the BAW2810* tracking start/end pointers. Thus the TX BAW state is now2811* completely busted.2812*2813* But for now, since I haven't yet fixed TDMA and buffer cloning,2814* it's quite possible that a cloned buffer is making its way2815* here and causing it to fire off. Disable TDMA for now.2816*/2817if (tid->tx_buf[cindex] != bf) {2818DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2819"%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",2820__func__, bf, SEQNO(bf->bf_state.bfs_seqno),2821tid->tx_buf[cindex],2822(tid->tx_buf[cindex] != NULL) ?2823SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);2824}28252826tid->tx_buf[cindex] = NULL;28272828while (tid->baw_head != tid->baw_tail &&2829!tid->tx_buf[tid->baw_head]) {2830INCR(tap->txa_start, IEEE80211_SEQ_RANGE);2831INCR(tid->baw_head, ATH_TID_MAX_BUFS);2832}2833DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,2834"%s: tid=%d: baw is now %d:%d, baw head=%d\n",2835__func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);2836}28372838static void2839ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,2840struct ath_buf *bf)2841{2842struct ieee80211_frame *wh;28432844ATH_TX_LOCK_ASSERT(sc);28452846if (tid->an->an_leak_count > 0) {2847wh = mtod(bf->bf_m, struct ieee80211_frame *);28482849/*2850* Update MORE based on the software/net80211 queue states.2851*/2852if ((tid->an->an_stack_psq > 0)2853|| (tid->an->an_swq_depth > 0))2854wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;2855else2856wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;28572858DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,2859"%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",2860__func__,2861tid->an->an_node.ni_macaddr,2862":",2863tid->an->an_leak_count,2864tid->an->an_stack_psq,2865tid->an->an_swq_depth,2866!! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));28672868/*2869* Re-sync the underlying buffer.2870*/2871bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,2872BUS_DMASYNC_PREWRITE);28732874tid->an->an_leak_count --;2875}2876}28772878static int2879ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)2880{28812882ATH_TX_LOCK_ASSERT(sc);28832884if (tid->an->an_leak_count > 0) {2885return (1);2886}2887if (tid->paused)2888return (0);2889return (1);2890}28912892/*2893* Mark the current node/TID as ready to TX.2894*2895* This is done to make it easy for the software scheduler to2896* find which nodes have data to send.2897*2898* The TXQ lock must be held.2899*/2900void2901ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)2902{2903struct ath_txq *txq = sc->sc_ac2q[tid->ac];29042905ATH_TX_LOCK_ASSERT(sc);29062907/*2908* If we are leaking out a frame to this destination2909* for PS-POLL, ensure that we allow scheduling to2910* occur.2911*/2912if (! ath_tx_tid_can_tx_or_sched(sc, tid))2913return; /* paused, can't schedule yet */29142915if (tid->sched)2916return; /* already scheduled */29172918tid->sched = 1;29192920#if 02921/*2922* If this is a sleeping node we're leaking to, given2923* it a higher priority. This is so bad for QoS it hurts.2924*/2925if (tid->an->an_leak_count) {2926TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);2927} else {2928TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);2929}2930#endif29312932/*2933* We can't do the above - it'll confuse the TXQ software2934* scheduler which will keep checking the _head_ TID2935* in the list to see if it has traffic. If we queue2936* a TID to the head of the list and it doesn't transmit,2937* we'll check it again.2938*2939* So, get the rest of this leaking frames support working2940* and reliable first and _then_ optimise it so they're2941* pushed out in front of any other pending software2942* queued nodes.2943*/2944TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);2945}29462947/*2948* Mark the current node as no longer needing to be polled for2949* TX packets.2950*2951* The TXQ lock must be held.2952*/2953static void2954ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)2955{2956struct ath_txq *txq = sc->sc_ac2q[tid->ac];29572958ATH_TX_LOCK_ASSERT(sc);29592960if (tid->sched == 0)2961return;29622963tid->sched = 0;2964TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);2965}29662967/*2968* Assign a sequence number manually to the given frame.2969*2970* This should only be called for A-MPDU TX frames.2971*2972* Note: for group addressed frames, the sequence number2973* should be from NONQOS_TID, and net80211 should have2974* already assigned it for us.2975*/2976static ieee80211_seq2977ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,2978struct ath_buf *bf, struct mbuf *m0)2979{2980struct ieee80211_frame *wh;2981int tid;2982ieee80211_seq seqno;2983uint8_t subtype;29842985wh = mtod(m0, struct ieee80211_frame *);2986tid = ieee80211_gettid(wh);29872988DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n",2989__func__, tid, IEEE80211_QOS_HAS_SEQ(wh));29902991/* XXX Is it a control frame? Ignore */29922993/* Does the packet require a sequence number? */2994if (! IEEE80211_QOS_HAS_SEQ(wh))2995return -1;29962997ATH_TX_LOCK_ASSERT(sc);29982999/* TODO: can this use ieee80211_output_seqno_assign() now? */30003001/*3002* Is it a QOS NULL Data frame? Give it a sequence number from3003* the default TID (IEEE80211_NONQOS_TID.)3004*3005* The RX path of everything I've looked at doesn't include the NULL3006* data frame sequence number in the aggregation state updates, so3007* assigning it a sequence number there will cause a BAW hole on the3008* RX side.3009*/3010subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;3011if (IEEE80211_IS_QOS_NULL(wh)) {3012/* XXX no locking for this TID? This is a bit of a problem. */3013seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];3014INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);3015} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {3016/*3017* group addressed frames get a sequence number from3018* a different sequence number space.3019*/3020seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];3021INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);3022} else {3023/* Manually assign sequence number */3024seqno = ni->ni_txseqs[tid];3025INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);3026}3027*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);3028M_SEQNO_SET(m0, seqno);30293030/* Return so caller can do something with it if needed */3031DPRINTF(sc, ATH_DEBUG_SW_TX,3032"%s: -> subtype=0x%x, tid=%d, seqno=%d\n",3033__func__, subtype, tid, seqno);3034return seqno;3035}30363037/*3038* Attempt to direct dispatch an aggregate frame to hardware.3039* If the frame is out of BAW, queue.3040* Otherwise, schedule it as a single frame.3041*/3042static void3043ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,3044struct ath_txq *txq, struct ath_buf *bf)3045{3046struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];3047struct ieee80211_tx_ampdu *tap;30483049ATH_TX_LOCK_ASSERT(sc);30503051tap = ath_tx_get_tx_tid(an, tid->tid);30523053/* paused? queue */3054if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {3055ATH_TID_INSERT_HEAD(tid, bf, bf_list);3056/* XXX don't sched - we're paused! */3057return;3058}30593060/* outside baw? queue */3061if (bf->bf_state.bfs_dobaw &&3062(! BAW_WITHIN(tap->txa_start, tap->txa_wnd,3063SEQNO(bf->bf_state.bfs_seqno)))) {3064ATH_TID_INSERT_HEAD(tid, bf, bf_list);3065ath_tx_tid_sched(sc, tid);3066return;3067}30683069/*3070* This is a temporary check and should be removed once3071* all the relevant code paths have been fixed.3072*3073* During aggregate retries, it's possible that the head3074* frame will fail (which has the bfs_aggr and bfs_nframes3075* fields set for said aggregate) and will be retried as3076* a single frame. In this instance, the values should3077* be reset or the completion code will get upset with you.3078*/3079if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {3080DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,3081"%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,3082bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);3083bf->bf_state.bfs_aggr = 0;3084bf->bf_state.bfs_nframes = 1;3085}30863087/* Update CLRDMASK just before this frame is queued */3088ath_tx_update_clrdmask(sc, tid, bf);30893090/* Direct dispatch to hardware */3091ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,3092false);3093ath_tx_calc_duration(sc, bf);3094ath_tx_calc_protection(sc, bf);3095ath_tx_set_rtscts(sc, bf);3096ath_tx_rate_fill_rcflags(sc, bf);3097ath_tx_setds(sc, bf);30983099/* Statistics */3100sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;31013102/* Track per-TID hardware queue depth correctly */3103tid->hwq_depth++;31043105/* Add to BAW */3106if (bf->bf_state.bfs_dobaw) {3107ath_tx_addto_baw(sc, an, tid, bf);3108bf->bf_state.bfs_addedbaw = 1;3109}31103111/* Set completion handler, multi-frame aggregate or not */3112bf->bf_comp = ath_tx_aggr_comp;31133114/*3115* Update the current leak count if3116* we're leaking frames; and set the3117* MORE flag as appropriate.3118*/3119ath_tx_leak_count_update(sc, tid, bf);31203121/* Hand off to hardware */3122ath_tx_handoff(sc, txq, bf);3123}31243125/*3126* Attempt to send the packet.3127* If the queue isn't busy, direct-dispatch.3128* If the queue is busy enough, queue the given packet on the3129* relevant software queue.3130*/3131void3132ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,3133struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)3134{3135struct ath_node *an = ATH_NODE(ni);3136struct ieee80211_frame *wh;3137struct ath_tid *atid;3138int pri, tid;3139struct mbuf *m0 = bf->bf_m;31403141ATH_TX_LOCK_ASSERT(sc);31423143/* Fetch the TID - non-QoS frames get assigned to TID 16 */3144wh = mtod(m0, struct ieee80211_frame *);3145pri = ath_tx_getac(sc, m0);3146tid = ath_tx_gettid(sc, m0);3147atid = &an->an_tid[tid];31483149DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",3150__func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));31513152/* Set local packet state, used to queue packets to hardware */3153/* XXX potentially duplicate info, re-check */3154bf->bf_state.bfs_tid = tid;3155bf->bf_state.bfs_tx_queue = txq->axq_qnum;3156bf->bf_state.bfs_pri = pri;31573158/*3159* If the hardware queue isn't busy, queue it directly.3160* If the hardware queue is busy, queue it.3161* If the TID is paused or the traffic it outside BAW, software3162* queue it.3163*3164* If the node is in power-save and we're leaking a frame,3165* leak a single frame.3166*/3167if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {3168/* TID is paused, queue */3169DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);3170/*3171* If the caller requested that it be sent at a high3172* priority, queue it at the head of the list.3173*/3174if (queue_to_head)3175ATH_TID_INSERT_HEAD(atid, bf, bf_list);3176else3177ATH_TID_INSERT_TAIL(atid, bf, bf_list);3178} else if (ath_tx_ampdu_pending(sc, an, tid)) {3179/* AMPDU pending; queue */3180DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);3181ATH_TID_INSERT_TAIL(atid, bf, bf_list);3182/* XXX sched? */3183} else if (ath_tx_ampdu_running(sc, an, tid)) {3184/*3185* AMPDU running, queue single-frame if the hardware queue3186* isn't busy.3187*3188* If the hardware queue is busy, sending an aggregate frame3189* then just hold off so we can queue more aggregate frames.3190*3191* Otherwise we may end up with single frames leaking through3192* because we are dispatching them too quickly.3193*3194* TODO: maybe we should treat this as two policies - minimise3195* latency, or maximise throughput. Then for BE/BK we can3196* maximise throughput, and VO/VI (if AMPDU is enabled!)3197* minimise latency.3198*/31993200/*3201* Always queue the frame to the tail of the list.3202*/3203ATH_TID_INSERT_TAIL(atid, bf, bf_list);32043205/*3206* If the hardware queue isn't busy, direct dispatch3207* the head frame in the list.3208*3209* Note: if we're say, configured to do ADDBA but not A-MPDU3210* then maybe we want to still queue two non-aggregate frames3211* to the hardware. Again with the per-TID policy3212* configuration..)3213*3214* Otherwise, schedule the TID.3215*/3216/* XXX TXQ locking */3217if (txq->axq_depth + txq->fifo.axq_depth == 0) {3218bf = ATH_TID_FIRST(atid);3219ATH_TID_REMOVE(atid, bf, bf_list);32203221/*3222* Ensure it's definitely treated as a non-AMPDU3223* frame - this information may have been left3224* over from a previous attempt.3225*/3226bf->bf_state.bfs_aggr = 0;3227bf->bf_state.bfs_nframes = 1;32283229/* Queue to the hardware */3230ath_tx_xmit_aggr(sc, an, txq, bf);3231DPRINTF(sc, ATH_DEBUG_SW_TX,3232"%s: xmit_aggr\n",3233__func__);3234} else {3235DPRINTF(sc, ATH_DEBUG_SW_TX,3236"%s: ampdu; swq'ing\n",3237__func__);32383239ath_tx_tid_sched(sc, atid);3240}3241/*3242* If we're not doing A-MPDU, be prepared to direct dispatch3243* up to both limits if possible. This particular corner3244* case may end up with packet starvation between aggregate3245* traffic and non-aggregate traffic: we want to ensure3246* that non-aggregate stations get a few frames queued to the3247* hardware before the aggregate station(s) get their chance.3248*3249* So if you only ever see a couple of frames direct dispatched3250* to the hardware from a non-AMPDU client, check both here3251* and in the software queue dispatcher to ensure that those3252* non-AMPDU stations get a fair chance to transmit.3253*/3254/* XXX TXQ locking */3255} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&3256(txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {3257/* AMPDU not running, attempt direct dispatch */3258DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);3259/* See if clrdmask needs to be set */3260ath_tx_update_clrdmask(sc, atid, bf);32613262/*3263* Update the current leak count if3264* we're leaking frames; and set the3265* MORE flag as appropriate.3266*/3267ath_tx_leak_count_update(sc, atid, bf);32683269/*3270* Dispatch the frame.3271*/3272ath_tx_xmit_normal(sc, txq, bf);3273} else {3274/* Busy; queue */3275DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);3276ATH_TID_INSERT_TAIL(atid, bf, bf_list);3277ath_tx_tid_sched(sc, atid);3278}3279}32803281/*3282* Only set the clrdmask bit if none of the nodes are currently3283* filtered.3284*3285* XXX TODO: go through all the callers and check to see3286* which are being called in the context of looping over all3287* TIDs (eg, if all tids are being paused, resumed, etc.)3288* That'll avoid O(n^2) complexity here.3289*/3290static void3291ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)3292{3293int i;32943295ATH_TX_LOCK_ASSERT(sc);32963297for (i = 0; i < IEEE80211_TID_SIZE; i++) {3298if (an->an_tid[i].isfiltered == 1)3299return;3300}3301an->clrdmask = 1;3302}33033304/*3305* Configure the per-TID node state.3306*3307* This likely belongs in if_ath_node.c but I can't think of anywhere3308* else to put it just yet.3309*3310* This sets up the SLISTs and the mutex as appropriate.3311*/3312void3313ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)3314{3315int i, j;3316struct ath_tid *atid;33173318for (i = 0; i < IEEE80211_TID_SIZE; i++) {3319atid = &an->an_tid[i];33203321/* XXX now with this bzer(), is the field 0'ing needed? */3322bzero(atid, sizeof(*atid));33233324TAILQ_INIT(&atid->tid_q);3325TAILQ_INIT(&atid->filtq.tid_q);3326atid->tid = i;3327atid->an = an;3328for (j = 0; j < ATH_TID_MAX_BUFS; j++)3329atid->tx_buf[j] = NULL;3330atid->baw_head = atid->baw_tail = 0;3331atid->paused = 0;3332atid->sched = 0;3333atid->hwq_depth = 0;3334atid->cleanup_inprogress = 0;3335if (i == IEEE80211_NONQOS_TID)3336atid->ac = ATH_NONQOS_TID_AC;3337else3338atid->ac = TID_TO_WME_AC(i);3339}3340an->clrdmask = 1; /* Always start by setting this bit */3341}33423343/*3344* Pause the current TID. This stops packets from being transmitted3345* on it.3346*3347* Since this is also called from upper layers as well as the driver,3348* it will get the TID lock.3349*/3350static void3351ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)3352{33533354ATH_TX_LOCK_ASSERT(sc);3355tid->paused++;3356DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",3357__func__,3358tid->an->an_node.ni_macaddr, ":",3359tid->tid,3360tid->paused);3361}33623363/*3364* Unpause the current TID, and schedule it if needed.3365*/3366static void3367ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)3368{3369ATH_TX_LOCK_ASSERT(sc);33703371/*3372* There's some odd places where ath_tx_tid_resume() is called3373* when it shouldn't be; this works around that particular issue3374* until it's actually resolved.3375*/3376if (tid->paused == 0) {3377device_printf(sc->sc_dev,3378"%s: [%6D]: tid=%d, paused=0?\n",3379__func__,3380tid->an->an_node.ni_macaddr, ":",3381tid->tid);3382} else {3383tid->paused--;3384}33853386DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,3387"%s: [%6D]: tid=%d, unpaused = %d\n",3388__func__,3389tid->an->an_node.ni_macaddr, ":",3390tid->tid,3391tid->paused);33923393if (tid->paused)3394return;33953396/*3397* Override the clrdmask configuration for the next frame3398* from this TID, just to get the ball rolling.3399*/3400ath_tx_set_clrdmask(sc, tid->an);34013402if (tid->axq_depth == 0)3403return;34043405/* XXX isfiltered shouldn't ever be 0 at this point */3406if (tid->isfiltered == 1) {3407DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",3408__func__);3409return;3410}34113412ath_tx_tid_sched(sc, tid);34133414/*3415* Queue the software TX scheduler.3416*/3417ath_tx_swq_kick(sc);3418}34193420/*3421* Add the given ath_buf to the TID filtered frame list.3422* This requires the TID be filtered.3423*/3424static void3425ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,3426struct ath_buf *bf)3427{34283429ATH_TX_LOCK_ASSERT(sc);34303431if (!tid->isfiltered)3432DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",3433__func__);34343435DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);34363437/* Set the retry bit and bump the retry counter */3438ath_tx_set_retry(sc, bf);3439sc->sc_stats.ast_tx_swfiltered++;34403441ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);3442}34433444/*3445* Handle a completed filtered frame from the given TID.3446* This just enables/pauses the filtered frame state if required3447* and appends the filtered frame to the filtered queue.3448*/3449static void3450ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,3451struct ath_buf *bf)3452{34533454ATH_TX_LOCK_ASSERT(sc);34553456if (! tid->isfiltered) {3457DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",3458__func__, tid->tid);3459tid->isfiltered = 1;3460ath_tx_tid_pause(sc, tid);3461}34623463/* Add the frame to the filter queue */3464ath_tx_tid_filt_addbuf(sc, tid, bf);3465}34663467/*3468* Complete the filtered frame TX completion.3469*3470* If there are no more frames in the hardware queue, unpause/unfilter3471* the TID if applicable. Otherwise we will wait for a node PS transition3472* to unfilter.3473*/3474static void3475ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)3476{3477struct ath_buf *bf;3478int do_resume = 0;34793480ATH_TX_LOCK_ASSERT(sc);34813482if (tid->hwq_depth != 0)3483return;34843485DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",3486__func__, tid->tid);3487if (tid->isfiltered == 1) {3488tid->isfiltered = 0;3489do_resume = 1;3490}34913492/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */3493ath_tx_set_clrdmask(sc, tid->an);34943495/* XXX this is really quite inefficient */3496while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {3497ATH_TID_FILT_REMOVE(tid, bf, bf_list);3498ATH_TID_INSERT_HEAD(tid, bf, bf_list);3499}35003501/* And only resume if we had paused before */3502if (do_resume)3503ath_tx_tid_resume(sc, tid);3504}35053506/*3507* Called when a single (aggregate or otherwise) frame is completed.3508*3509* Returns 0 if the buffer could be added to the filtered list3510* (cloned or otherwise), 1 if the buffer couldn't be added to the3511* filtered list (failed clone; expired retry) and the caller should3512* free it and handle it like a failure (eg by sending a BAR.)3513*3514* since the buffer may be cloned, bf must be not touched after this3515* if the return value is 0.3516*/3517static int3518ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,3519struct ath_buf *bf)3520{3521struct ath_buf *nbf;3522int retval;35233524ATH_TX_LOCK_ASSERT(sc);35253526/*3527* Don't allow a filtered frame to live forever.3528*/3529if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {3530sc->sc_stats.ast_tx_swretrymax++;3531DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,3532"%s: bf=%p, seqno=%d, exceeded retries\n",3533__func__,3534bf,3535SEQNO(bf->bf_state.bfs_seqno));3536retval = 1; /* error */3537goto finish;3538}35393540/*3541* A busy buffer can't be added to the retry list.3542* It needs to be cloned.3543*/3544if (bf->bf_flags & ATH_BUF_BUSY) {3545nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);3546DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,3547"%s: busy buffer clone: %p -> %p\n",3548__func__, bf, nbf);3549} else {3550nbf = bf;3551}35523553if (nbf == NULL) {3554DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,3555"%s: busy buffer couldn't be cloned (%p)!\n",3556__func__, bf);3557retval = 1; /* error */3558} else {3559ath_tx_tid_filt_comp_buf(sc, tid, nbf);3560retval = 0; /* ok */3561}3562finish:3563ath_tx_tid_filt_comp_complete(sc, tid);35643565return (retval);3566}35673568static void3569ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,3570struct ath_buf *bf_first, ath_bufhead *bf_q)3571{3572struct ath_buf *bf, *bf_next, *nbf;35733574ATH_TX_LOCK_ASSERT(sc);35753576bf = bf_first;3577while (bf) {3578bf_next = bf->bf_next;3579bf->bf_next = NULL; /* Remove it from the aggr list */35803581/*3582* Don't allow a filtered frame to live forever.3583*/3584if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {3585sc->sc_stats.ast_tx_swretrymax++;3586DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,3587"%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",3588__func__,3589tid->tid,3590bf,3591SEQNO(bf->bf_state.bfs_seqno));3592TAILQ_INSERT_TAIL(bf_q, bf, bf_list);3593goto next;3594}35953596if (bf->bf_flags & ATH_BUF_BUSY) {3597nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);3598DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,3599"%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",3600__func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));3601} else {3602nbf = bf;3603}36043605/*3606* If the buffer couldn't be cloned, add it to bf_q;3607* the caller will free the buffer(s) as required.3608*/3609if (nbf == NULL) {3610DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,3611"%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",3612__func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));3613TAILQ_INSERT_TAIL(bf_q, bf, bf_list);3614} else {3615ath_tx_tid_filt_comp_buf(sc, tid, nbf);3616}3617next:3618bf = bf_next;3619}36203621ath_tx_tid_filt_comp_complete(sc, tid);3622}36233624/*3625* Suspend the queue because we need to TX a BAR.3626*/3627static void3628ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)3629{36303631ATH_TX_LOCK_ASSERT(sc);36323633DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3634"%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",3635__func__,3636tid->tid,3637tid->bar_wait,3638tid->bar_tx);36393640/* We shouldn't be called when bar_tx is 1 */3641if (tid->bar_tx) {3642DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3643"%s: bar_tx is 1?!\n", __func__);3644}36453646/* If we've already been called, just be patient. */3647if (tid->bar_wait)3648return;36493650/* Wait! */3651tid->bar_wait = 1;36523653/* Only one pause, no matter how many frames fail */3654ath_tx_tid_pause(sc, tid);3655}36563657/*3658* We've finished with BAR handling - either we succeeded or3659* failed. Either way, unsuspend TX.3660*/3661static void3662ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)3663{36643665ATH_TX_LOCK_ASSERT(sc);36663667DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3668"%s: %6D: TID=%d, called\n",3669__func__,3670tid->an->an_node.ni_macaddr,3671":",3672tid->tid);36733674if (tid->bar_tx == 0 || tid->bar_wait == 0) {3675DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3676"%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",3677__func__, tid->an->an_node.ni_macaddr, ":",3678tid->tid, tid->bar_tx, tid->bar_wait);3679}36803681tid->bar_tx = tid->bar_wait = 0;3682ath_tx_tid_resume(sc, tid);3683}36843685/*3686* Return whether we're ready to TX a BAR frame.3687*3688* Requires the TID lock be held.3689*/3690static int3691ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)3692{36933694ATH_TX_LOCK_ASSERT(sc);36953696if (tid->bar_wait == 0 || tid->hwq_depth > 0)3697return (0);36983699DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3700"%s: %6D: TID=%d, bar ready\n",3701__func__,3702tid->an->an_node.ni_macaddr,3703":",3704tid->tid);37053706return (1);3707}37083709/*3710* Check whether the current TID is ready to have a BAR3711* TXed and if so, do the TX.3712*3713* Since the TID/TXQ lock can't be held during a call to3714* ieee80211_send_bar(), we have to do the dirty thing of unlocking it,3715* sending the BAR and locking it again.3716*3717* Eventually, the code to send the BAR should be broken out3718* from this routine so the lock doesn't have to be reacquired3719* just to be immediately dropped by the caller.3720*/3721static void3722ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)3723{3724struct ieee80211_tx_ampdu *tap;37253726ATH_TX_LOCK_ASSERT(sc);37273728DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3729"%s: %6D: TID=%d, called\n",3730__func__,3731tid->an->an_node.ni_macaddr,3732":",3733tid->tid);37343735tap = ath_tx_get_tx_tid(tid->an, tid->tid);37363737/*3738* This is an error condition!3739*/3740if (tid->bar_wait == 0 || tid->bar_tx == 1) {3741DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3742"%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",3743__func__, tid->an->an_node.ni_macaddr, ":",3744tid->tid, tid->bar_tx, tid->bar_wait);3745return;3746}37473748/* Don't do anything if we still have pending frames */3749if (tid->hwq_depth > 0) {3750DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3751"%s: %6D: TID=%d, hwq_depth=%d, waiting\n",3752__func__,3753tid->an->an_node.ni_macaddr,3754":",3755tid->tid,3756tid->hwq_depth);3757return;3758}37593760/* We're now about to TX */3761tid->bar_tx = 1;37623763/*3764* Override the clrdmask configuration for the next frame,3765* just to get the ball rolling.3766*/3767ath_tx_set_clrdmask(sc, tid->an);37683769/*3770* Calculate new BAW left edge, now that all frames have either3771* succeeded or failed.3772*3773* XXX verify this is _actually_ the valid value to begin at!3774*/3775DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3776"%s: %6D: TID=%d, new BAW left edge=%d\n",3777__func__,3778tid->an->an_node.ni_macaddr,3779":",3780tid->tid,3781tap->txa_start);37823783/* Try sending the BAR frame */3784/* We can't hold the lock here! */37853786ATH_TX_UNLOCK(sc);3787if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {3788/* Success? Now we wait for notification that it's done */3789ATH_TX_LOCK(sc);3790return;3791}37923793/* Failure? For now, warn loudly and continue */3794ATH_TX_LOCK(sc);3795DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,3796"%s: %6D: TID=%d, failed to TX BAR, continue!\n",3797__func__, tid->an->an_node.ni_macaddr, ":",3798tid->tid);3799ath_tx_tid_bar_unsuspend(sc, tid);3800}38013802static void3803ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,3804struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)3805{38063807ATH_TX_LOCK_ASSERT(sc);38083809/*3810* If the current TID is running AMPDU, update3811* the BAW.3812*/3813if (ath_tx_ampdu_running(sc, an, tid->tid) &&3814bf->bf_state.bfs_dobaw) {3815/*3816* Only remove the frame from the BAW if it's3817* been transmitted at least once; this means3818* the frame was in the BAW to begin with.3819*/3820if (bf->bf_state.bfs_retries > 0) {3821ath_tx_update_baw(sc, an, tid, bf);3822bf->bf_state.bfs_dobaw = 0;3823}3824#if 03825/*3826* This has become a non-fatal error now3827*/3828if (! bf->bf_state.bfs_addedbaw)3829DPRINTF(sc, ATH_DEBUG_SW_TX_BAW3830"%s: wasn't added: seqno %d\n",3831__func__, SEQNO(bf->bf_state.bfs_seqno));3832#endif3833}38343835/* Strip it out of an aggregate list if it was in one */3836bf->bf_next = NULL;38373838/* Insert on the free queue to be freed by the caller */3839TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);3840}38413842static void3843ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,3844const char *pfx, struct ath_tid *tid, struct ath_buf *bf)3845{3846struct ieee80211_node *ni = &an->an_node;3847struct ath_txq *txq;3848struct ieee80211_tx_ampdu *tap;38493850txq = sc->sc_ac2q[tid->ac];3851tap = ath_tx_get_tx_tid(an, tid->tid);38523853DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,3854"%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "3855"seqno=%d, retry=%d\n",3856__func__,3857pfx,3858ni->ni_macaddr,3859":",3860bf,3861bf->bf_state.bfs_addedbaw,3862bf->bf_state.bfs_dobaw,3863SEQNO(bf->bf_state.bfs_seqno),3864bf->bf_state.bfs_retries);3865DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,3866"%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",3867__func__,3868pfx,3869ni->ni_macaddr,3870":",3871bf,3872txq->axq_qnum,3873txq->axq_depth,3874txq->axq_aggr_depth);3875DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,3876"%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "3877"isfiltered=%d\n",3878__func__,3879pfx,3880ni->ni_macaddr,3881":",3882bf,3883tid->axq_depth,3884tid->hwq_depth,3885tid->bar_wait,3886tid->isfiltered);3887DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,3888"%s: %s: %6D: tid %d: "3889"sched=%d, paused=%d, "3890"incomp=%d, baw_head=%d, "3891"baw_tail=%d txa_start=%d, ni_txseqs=%d\n",3892__func__,3893pfx,3894ni->ni_macaddr,3895":",3896tid->tid,3897tid->sched, tid->paused,3898tid->incomp, tid->baw_head,3899tid->baw_tail, tap == NULL ? -1 : tap->txa_start,3900ni->ni_txseqs[tid->tid]);39013902/* XXX Dump the frame, see what it is? */3903if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))3904ieee80211_dump_pkt(ni->ni_ic,3905mtod(bf->bf_m, const uint8_t *),3906bf->bf_m->m_len, 0, -1);3907}39083909/*3910* Free any packets currently pending in the software TX queue.3911*3912* This will be called when a node is being deleted.3913*3914* It can also be called on an active node during an interface3915* reset or state transition.3916*3917* (From Linux/reference):3918*3919* TODO: For frame(s) that are in the retry state, we will reuse the3920* sequence number(s) without setting the retry bit. The3921* alternative is to give up on these and BAR the receiver's window3922* forward.3923*/3924static void3925ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,3926struct ath_tid *tid, ath_bufhead *bf_cq)3927{3928struct ath_buf *bf;3929struct ieee80211_tx_ampdu *tap;3930struct ieee80211_node *ni = &an->an_node;3931int t;39323933tap = ath_tx_get_tx_tid(an, tid->tid);39343935ATH_TX_LOCK_ASSERT(sc);39363937/* Walk the queue, free frames */3938t = 0;3939for (;;) {3940bf = ATH_TID_FIRST(tid);3941if (bf == NULL) {3942break;3943}39443945if (t == 0) {3946ath_tx_tid_drain_print(sc, an, "norm", tid, bf);3947// t = 1;3948}39493950ATH_TID_REMOVE(tid, bf, bf_list);3951ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);3952}39533954/* And now, drain the filtered frame queue */3955t = 0;3956for (;;) {3957bf = ATH_TID_FILT_FIRST(tid);3958if (bf == NULL)3959break;39603961if (t == 0) {3962ath_tx_tid_drain_print(sc, an, "filt", tid, bf);3963// t = 1;3964}39653966ATH_TID_FILT_REMOVE(tid, bf, bf_list);3967ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);3968}39693970/*3971* Override the clrdmask configuration for the next frame3972* in case there is some future transmission, just to get3973* the ball rolling.3974*3975* This won't hurt things if the TID is about to be freed.3976*/3977ath_tx_set_clrdmask(sc, tid->an);39783979/*3980* Now that it's completed, grab the TID lock and update3981* the sequence number and BAW window.3982* Because sequence numbers have been assigned to frames3983* that haven't been sent yet, it's entirely possible3984* we'll be called with some pending frames that have not3985* been transmitted.3986*3987* The cleaner solution is to do the sequence number allocation3988* when the packet is first transmitted - and thus the "retries"3989* check above would be enough to update the BAW/seqno.3990*/39913992/* But don't do it for non-QoS TIDs */3993if (tap) {3994#if 13995DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,3996"%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",3997__func__,3998ni->ni_macaddr,3999":",4000an,4001tid->tid,4002tap->txa_start);4003#endif4004ni->ni_txseqs[tid->tid] = tap->txa_start;4005tid->baw_tail = tid->baw_head;4006}4007}40084009/*4010* Reset the TID state. This must be only called once the node has4011* had its frames flushed from this TID, to ensure that no other4012* pause / unpause logic can kick in.4013*/4014static void4015ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)4016{40174018#if 04019tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;4020tid->paused = tid->sched = tid->addba_tx_pending = 0;4021tid->incomp = tid->cleanup_inprogress = 0;4022#endif40234024/*4025* If we have a bar_wait set, we need to unpause the TID4026* here. Otherwise once cleanup has finished, the TID won't4027* have the right paused counter.4028*4029* XXX I'm not going through resume here - I don't want the4030* node to be rescheuled just yet. This however should be4031* methodized!4032*/4033if (tid->bar_wait) {4034if (tid->paused > 0) {4035tid->paused --;4036}4037}40384039/*4040* XXX same with a currently filtered TID.4041*4042* Since this is being called during a flush, we assume that4043* the filtered frame list is actually empty.4044*4045* XXX TODO: add in a check to ensure that the filtered queue4046* depth is actually 0!4047*/4048if (tid->isfiltered) {4049if (tid->paused > 0) {4050tid->paused --;4051}4052}40534054/*4055* Clear BAR, filtered frames, scheduled and ADDBA pending.4056* The TID may be going through cleanup from the last association4057* where things in the BAW are still in the hardware queue.4058*/4059tid->bar_wait = 0;4060tid->bar_tx = 0;4061tid->isfiltered = 0;4062tid->sched = 0;4063tid->addba_tx_pending = 0;40644065/*4066* XXX TODO: it may just be enough to walk the HWQs and mark4067* frames for that node as non-aggregate; or mark the ath_node4068* with something that indicates that aggregation is no longer4069* occurring. Then we can just toss the BAW complaints and4070* do a complete hard reset of state here - no pause, no4071* complete counter, etc.4072*/40734074}40754076/*4077* Flush all software queued packets for the given node.4078*4079* This occurs when a completion handler frees the last buffer4080* for a node, and the node is thus freed. This causes the node4081* to be cleaned up, which ends up calling ath_tx_node_flush.4082*/4083void4084ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)4085{4086int tid;4087ath_bufhead bf_cq;4088struct ath_buf *bf;40894090TAILQ_INIT(&bf_cq);40914092ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",4093&an->an_node);40944095ATH_TX_LOCK(sc);4096DPRINTF(sc, ATH_DEBUG_NODE,4097"%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "4098"swq_depth=%d, clrdmask=%d, leak_count=%d\n",4099__func__,4100an->an_node.ni_macaddr,4101":",4102an->an_is_powersave,4103an->an_stack_psq,4104an->an_tim_set,4105an->an_swq_depth,4106an->clrdmask,4107an->an_leak_count);41084109for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {4110struct ath_tid *atid = &an->an_tid[tid];41114112/* Free packets */4113ath_tx_tid_drain(sc, an, atid, &bf_cq);41144115/* Remove this tid from the list of active tids */4116ath_tx_tid_unsched(sc, atid);41174118/* Reset the per-TID pause, BAR, etc state */4119ath_tx_tid_reset(sc, atid);4120}41214122/*4123* Clear global leak count4124*/4125an->an_leak_count = 0;4126ATH_TX_UNLOCK(sc);41274128/* Handle completed frames */4129while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {4130TAILQ_REMOVE(&bf_cq, bf, bf_list);4131ath_tx_default_comp(sc, bf, 0);4132}4133}41344135/*4136* Drain all the software TXQs currently with traffic queued.4137*/4138void4139ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)4140{4141struct ath_tid *tid;4142ath_bufhead bf_cq;4143struct ath_buf *bf;41444145TAILQ_INIT(&bf_cq);4146ATH_TX_LOCK(sc);41474148/*4149* Iterate over all active tids for the given txq,4150* flushing and unsched'ing them4151*/4152while (! TAILQ_EMPTY(&txq->axq_tidq)) {4153tid = TAILQ_FIRST(&txq->axq_tidq);4154ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);4155ath_tx_tid_unsched(sc, tid);4156}41574158ATH_TX_UNLOCK(sc);41594160while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {4161TAILQ_REMOVE(&bf_cq, bf, bf_list);4162ath_tx_default_comp(sc, bf, 0);4163}4164}41654166/*4167* Handle completion of non-aggregate session frames.4168*4169* This (currently) doesn't implement software retransmission of4170* non-aggregate frames!4171*4172* Software retransmission of non-aggregate frames needs to obey4173* the strict sequence number ordering, and drop any frames that4174* will fail this.4175*4176* For now, filtered frames and frame transmission will cause4177* all kinds of issues. So we don't support them.4178*4179* So anyone queuing frames via ath_tx_normal_xmit() or4180* ath_tx_hw_queue_norm() must override and set CLRDMASK.4181*/4182void4183ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)4184{4185struct ieee80211_node *ni = bf->bf_node;4186struct ath_node *an = ATH_NODE(ni);4187int tid = bf->bf_state.bfs_tid;4188struct ath_tid *atid = &an->an_tid[tid];4189struct ath_tx_status *ts = &bf->bf_status.ds_txstat;41904191/* The TID state is protected behind the TXQ lock */4192ATH_TX_LOCK(sc);41934194DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",4195__func__, bf, fail, atid->hwq_depth - 1);41964197atid->hwq_depth--;41984199#if 04200/*4201* If the frame was filtered, stick it on the filter frame4202* queue and complain about it. It shouldn't happen!4203*/4204if ((ts->ts_status & HAL_TXERR_FILT) ||4205(ts->ts_status != 0 && atid->isfiltered)) {4206DPRINTF(sc, ATH_DEBUG_SW_TX,4207"%s: isfiltered=%d, ts_status=%d: huh?\n",4208__func__,4209atid->isfiltered,4210ts->ts_status);4211ath_tx_tid_filt_comp_buf(sc, atid, bf);4212}4213#endif4214if (atid->isfiltered)4215DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);4216if (atid->hwq_depth < 0)4217DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",4218__func__, atid->hwq_depth);42194220/* If the TID is being cleaned up, track things */4221/* XXX refactor! */4222if (atid->cleanup_inprogress) {4223atid->incomp--;4224if (atid->incomp == 0) {4225DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,4226"%s: TID %d: cleaned up! resume!\n",4227__func__, tid);4228atid->cleanup_inprogress = 0;4229ath_tx_tid_resume(sc, atid);4230}4231}42324233/*4234* If the queue is filtered, potentially mark it as complete4235* and reschedule it as needed.4236*4237* This is required as there may be a subsequent TX descriptor4238* for this end-node that has CLRDMASK set, so it's quite possible4239* that a filtered frame will be followed by a non-filtered4240* (complete or otherwise) frame.4241*4242* XXX should we do this before we complete the frame?4243*/4244if (atid->isfiltered)4245ath_tx_tid_filt_comp_complete(sc, atid);4246ATH_TX_UNLOCK(sc);42474248/*4249* punt to rate control if we're not being cleaned up4250* during a hw queue drain and the frame wanted an ACK.4251*/4252if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))4253ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,4254ts,4255bf->bf_state.bfs_pktlen,4256bf->bf_state.bfs_pktlen,42571, (ts->ts_status == 0) ? 0 : 1);42584259ath_tx_default_comp(sc, bf, fail);4260}42614262/*4263* Handle cleanup of aggregate session packets that aren't4264* an A-MPDU.4265*4266* There's no need to update the BAW here - the session is being4267* torn down.4268*/4269static void4270ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)4271{4272struct ieee80211_node *ni = bf->bf_node;4273struct ath_node *an = ATH_NODE(ni);4274int tid = bf->bf_state.bfs_tid;4275struct ath_tid *atid = &an->an_tid[tid];42764277DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",4278__func__, tid, atid->incomp);42794280ATH_TX_LOCK(sc);4281atid->incomp--;42824283/* XXX refactor! */4284if (bf->bf_state.bfs_dobaw) {4285ath_tx_update_baw(sc, an, atid, bf);4286if (!bf->bf_state.bfs_addedbaw)4287DPRINTF(sc, ATH_DEBUG_SW_TX,4288"%s: wasn't added: seqno %d\n",4289__func__, SEQNO(bf->bf_state.bfs_seqno));4290}42914292if (atid->incomp == 0) {4293DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,4294"%s: TID %d: cleaned up! resume!\n",4295__func__, tid);4296atid->cleanup_inprogress = 0;4297ath_tx_tid_resume(sc, atid);4298}4299ATH_TX_UNLOCK(sc);43004301ath_tx_default_comp(sc, bf, 0);4302}43034304/*4305* This as it currently stands is a bit dumb. Ideally we'd just4306* fail the frame the normal way and have it permanently fail4307* via the normal aggregate completion path.4308*/4309static void4310ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,4311int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)4312{4313struct ath_tid *atid = &an->an_tid[tid];4314struct ath_buf *bf, *bf_next;43154316ATH_TX_LOCK_ASSERT(sc);43174318/*4319* Remove this frame from the queue.4320*/4321ATH_TID_REMOVE(atid, bf_head, bf_list);43224323/*4324* Loop over all the frames in the aggregate.4325*/4326bf = bf_head;4327while (bf != NULL) {4328bf_next = bf->bf_next; /* next aggregate frame, or NULL */43294330/*4331* If it's been added to the BAW we need to kick4332* it out of the BAW before we continue.4333*4334* XXX if it's an aggregate, assert that it's in the4335* BAW - we shouldn't have it be in an aggregate4336* otherwise!4337*/4338if (bf->bf_state.bfs_addedbaw) {4339ath_tx_update_baw(sc, an, atid, bf);4340bf->bf_state.bfs_dobaw = 0;4341}43424343/*4344* Give it the default completion handler.4345*/4346bf->bf_comp = ath_tx_normal_comp;4347bf->bf_next = NULL;43484349/*4350* Add it to the list to free.4351*/4352TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);43534354/*4355* Now advance to the next frame in the aggregate.4356*/4357bf = bf_next;4358}4359}43604361/*4362* Performs transmit side cleanup when TID changes from aggregated to4363* unaggregated and during reassociation.4364*4365* For now, this just tosses everything from the TID software queue4366* whether or not it has been retried and marks the TID as4367* pending completion if there's anything for this TID queued to4368* the hardware.4369*4370* The caller is responsible for pausing the TID and unpausing the4371* TID if no cleanup was required. Otherwise the cleanup path will4372* unpause the TID once the last hardware queued frame is completed.4373*/4374static void4375ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,4376ath_bufhead *bf_cq)4377{4378struct ath_tid *atid = &an->an_tid[tid];4379struct ath_buf *bf, *bf_next;43804381ATH_TX_LOCK_ASSERT(sc);43824383DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,4384"%s: TID %d: called; inprogress=%d\n", __func__, tid,4385atid->cleanup_inprogress);43864387/*4388* Move the filtered frames to the TX queue, before4389* we run off and discard/process things.4390*/43914392/* XXX this is really quite inefficient */4393while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {4394ATH_TID_FILT_REMOVE(atid, bf, bf_list);4395ATH_TID_INSERT_HEAD(atid, bf, bf_list);4396}43974398/*4399* Update the frames in the software TX queue:4400*4401* + Discard retry frames in the queue4402* + Fix the completion function to be non-aggregate4403*/4404bf = ATH_TID_FIRST(atid);4405while (bf) {4406/*4407* Grab the next frame in the list, we may4408* be fiddling with the list.4409*/4410bf_next = TAILQ_NEXT(bf, bf_list);44114412/*4413* Free the frame and all subframes.4414*/4415ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);44164417/*4418* Next frame!4419*/4420bf = bf_next;4421}44224423/*4424* If there's anything in the hardware queue we wait4425* for the TID HWQ to empty.4426*/4427if (atid->hwq_depth > 0) {4428/*4429* XXX how about we kill atid->incomp, and instead4430* replace it with a macro that checks that atid->hwq_depth4431* is 0?4432*/4433atid->incomp = atid->hwq_depth;4434atid->cleanup_inprogress = 1;4435}44364437if (atid->cleanup_inprogress)4438DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,4439"%s: TID %d: cleanup needed: %d packets\n",4440__func__, tid, atid->incomp);44414442/* Owner now must free completed frames */4443}44444445static struct ath_buf *4446ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,4447struct ath_tid *tid, struct ath_buf *bf)4448{4449struct ath_buf *nbf;4450int error;44514452/*4453* Clone the buffer. This will handle the dma unmap and4454* copy the node reference to the new buffer. If this4455* works out, 'bf' will have no DMA mapping, no mbuf4456* pointer and no node reference.4457*/4458nbf = ath_buf_clone(sc, bf);44594460#if 04461DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",4462__func__);4463#endif44644465if (nbf == NULL) {4466/* Failed to clone */4467DPRINTF(sc, ATH_DEBUG_XMIT,4468"%s: failed to clone a busy buffer\n",4469__func__);4470return NULL;4471}44724473/* Setup the dma for the new buffer */4474error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);4475if (error != 0) {4476DPRINTF(sc, ATH_DEBUG_XMIT,4477"%s: failed to setup dma for clone\n",4478__func__);4479/*4480* Put this at the head of the list, not tail;4481* that way it doesn't interfere with the4482* busy buffer logic (which uses the tail of4483* the list.)4484*/4485ATH_TXBUF_LOCK(sc);4486ath_returnbuf_head(sc, nbf);4487ATH_TXBUF_UNLOCK(sc);4488return NULL;4489}44904491/* Update BAW if required, before we free the original buf */4492if (bf->bf_state.bfs_dobaw)4493ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);44944495/* Free original buffer; return new buffer */4496ath_freebuf(sc, bf);44974498return nbf;4499}45004501/*4502* Handle retrying an unaggregate frame in an aggregate4503* session.4504*4505* If too many retries occur, pause the TID, wait for4506* any further retransmits (as there's no reason why4507* non-aggregate frames in an aggregate session are4508* transmitted in-order; they just have to be in-BAW)4509* and then queue a BAR.4510*/4511static void4512ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)4513{4514struct ieee80211_node *ni = bf->bf_node;4515struct ath_node *an = ATH_NODE(ni);4516int tid = bf->bf_state.bfs_tid;4517struct ath_tid *atid = &an->an_tid[tid];4518struct ieee80211_tx_ampdu *tap;45194520ATH_TX_LOCK(sc);45214522tap = ath_tx_get_tx_tid(an, tid);45234524/*4525* If the buffer is marked as busy, we can't directly4526* reuse it. Instead, try to clone the buffer.4527* If the clone is successful, recycle the old buffer.4528* If the clone is unsuccessful, set bfs_retries to max4529* to force the next bit of code to free the buffer4530* for us.4531*/4532if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&4533(bf->bf_flags & ATH_BUF_BUSY)) {4534struct ath_buf *nbf;4535nbf = ath_tx_retry_clone(sc, an, atid, bf);4536if (nbf)4537/* bf has been freed at this point */4538bf = nbf;4539else4540bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;4541}45424543if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {4544DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,4545"%s: exceeded retries; seqno %d\n",4546__func__, SEQNO(bf->bf_state.bfs_seqno));4547sc->sc_stats.ast_tx_swretrymax++;45484549/* Update BAW anyway */4550if (bf->bf_state.bfs_dobaw) {4551ath_tx_update_baw(sc, an, atid, bf);4552if (! bf->bf_state.bfs_addedbaw)4553DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,4554"%s: wasn't added: seqno %d\n",4555__func__, SEQNO(bf->bf_state.bfs_seqno));4556}4557bf->bf_state.bfs_dobaw = 0;45584559/* Suspend the TX queue and get ready to send the BAR */4560ath_tx_tid_bar_suspend(sc, atid);45614562/* Send the BAR if there are no other frames waiting */4563if (ath_tx_tid_bar_tx_ready(sc, atid))4564ath_tx_tid_bar_tx(sc, atid);45654566ATH_TX_UNLOCK(sc);45674568/* Free buffer, bf is free after this call */4569ath_tx_default_comp(sc, bf, 0);4570return;4571}45724573/*4574* This increments the retry counter as well as4575* sets the retry flag in the ath_buf and packet4576* body.4577*/4578ath_tx_set_retry(sc, bf);4579sc->sc_stats.ast_tx_swretries++;45804581/*4582* Insert this at the head of the queue, so it's4583* retried before any current/subsequent frames.4584*/4585ATH_TID_INSERT_HEAD(atid, bf, bf_list);4586ath_tx_tid_sched(sc, atid);4587/* Send the BAR if there are no other frames waiting */4588if (ath_tx_tid_bar_tx_ready(sc, atid))4589ath_tx_tid_bar_tx(sc, atid);45904591ATH_TX_UNLOCK(sc);4592}45934594/*4595* Common code for aggregate excessive retry/subframe retry.4596* If retrying, queues buffers to bf_q. If not, frees the4597* buffers.4598*4599* XXX should unify this with ath_tx_aggr_retry_unaggr()4600*/4601static int4602ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,4603ath_bufhead *bf_q)4604{4605struct ieee80211_node *ni = bf->bf_node;4606struct ath_node *an = ATH_NODE(ni);4607int tid = bf->bf_state.bfs_tid;4608struct ath_tid *atid = &an->an_tid[tid];46094610ATH_TX_LOCK_ASSERT(sc);46114612/* XXX clr11naggr should be done for all subframes */4613ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);4614ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);46154616/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */46174618/*4619* If the buffer is marked as busy, we can't directly4620* reuse it. Instead, try to clone the buffer.4621* If the clone is successful, recycle the old buffer.4622* If the clone is unsuccessful, set bfs_retries to max4623* to force the next bit of code to free the buffer4624* for us.4625*/4626if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&4627(bf->bf_flags & ATH_BUF_BUSY)) {4628struct ath_buf *nbf;4629nbf = ath_tx_retry_clone(sc, an, atid, bf);4630if (nbf)4631/* bf has been freed at this point */4632bf = nbf;4633else4634bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;4635}46364637if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {4638sc->sc_stats.ast_tx_swretrymax++;4639DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,4640"%s: max retries: seqno %d\n",4641__func__, SEQNO(bf->bf_state.bfs_seqno));4642ath_tx_update_baw(sc, an, atid, bf);4643if (!bf->bf_state.bfs_addedbaw)4644DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,4645"%s: wasn't added: seqno %d\n",4646__func__, SEQNO(bf->bf_state.bfs_seqno));4647bf->bf_state.bfs_dobaw = 0;4648return 1;4649}46504651ath_tx_set_retry(sc, bf);4652sc->sc_stats.ast_tx_swretries++;4653bf->bf_next = NULL; /* Just to make sure */46544655/* Clear the aggregate state */4656bf->bf_state.bfs_aggr = 0;4657bf->bf_state.bfs_ndelim = 0; /* ??? needed? */4658bf->bf_state.bfs_nframes = 1;46594660TAILQ_INSERT_TAIL(bf_q, bf, bf_list);4661return 0;4662}46634664/*4665* error pkt completion for an aggregate destination4666*/4667static void4668ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,4669struct ath_tid *tid)4670{4671struct ieee80211_node *ni = bf_first->bf_node;4672struct ath_node *an = ATH_NODE(ni);4673struct ath_buf *bf_next, *bf;4674ath_bufhead bf_q;4675int drops = 0;4676struct ieee80211_tx_ampdu *tap;4677ath_bufhead bf_cq;46784679TAILQ_INIT(&bf_q);4680TAILQ_INIT(&bf_cq);46814682/*4683* Update rate control - all frames have failed.4684*/4685ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,4686&bf_first->bf_status.ds_txstat,4687bf_first->bf_state.bfs_al,4688bf_first->bf_state.bfs_rc_maxpktlen,4689bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);46904691ATH_TX_LOCK(sc);4692tap = ath_tx_get_tx_tid(an, tid->tid);4693sc->sc_stats.ast_tx_aggr_failall++;46944695/* Retry all subframes */4696bf = bf_first;4697while (bf) {4698bf_next = bf->bf_next;4699bf->bf_next = NULL; /* Remove it from the aggr list */4700sc->sc_stats.ast_tx_aggr_fail++;4701if (ath_tx_retry_subframe(sc, bf, &bf_q)) {4702drops++;4703bf->bf_next = NULL;4704TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);4705}4706bf = bf_next;4707}47084709/* Prepend all frames to the beginning of the queue */4710while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {4711TAILQ_REMOVE(&bf_q, bf, bf_list);4712ATH_TID_INSERT_HEAD(tid, bf, bf_list);4713}47144715/*4716* Schedule the TID to be re-tried.4717*/4718ath_tx_tid_sched(sc, tid);47194720/*4721* send bar if we dropped any frames4722*4723* Keep the txq lock held for now, as we need to ensure4724* that ni_txseqs[] is consistent (as it's being updated4725* in the ifnet TX context or raw TX context.)4726*/4727if (drops) {4728/* Suspend the TX queue and get ready to send the BAR */4729ath_tx_tid_bar_suspend(sc, tid);4730}47314732/*4733* Send BAR if required4734*/4735if (ath_tx_tid_bar_tx_ready(sc, tid))4736ath_tx_tid_bar_tx(sc, tid);47374738ATH_TX_UNLOCK(sc);47394740/* Complete frames which errored out */4741while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {4742TAILQ_REMOVE(&bf_cq, bf, bf_list);4743ath_tx_default_comp(sc, bf, 0);4744}4745}47464747/*4748* Handle clean-up of packets from an aggregate list.4749*4750* There's no need to update the BAW here - the session is being4751* torn down.4752*/4753static void4754ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)4755{4756struct ath_buf *bf, *bf_next;4757struct ieee80211_node *ni = bf_first->bf_node;4758struct ath_node *an = ATH_NODE(ni);4759int tid = bf_first->bf_state.bfs_tid;4760struct ath_tid *atid = &an->an_tid[tid];47614762ATH_TX_LOCK(sc);47634764/* update incomp */4765atid->incomp--;47664767/* Update the BAW */4768bf = bf_first;4769while (bf) {4770/* XXX refactor! */4771if (bf->bf_state.bfs_dobaw) {4772ath_tx_update_baw(sc, an, atid, bf);4773if (!bf->bf_state.bfs_addedbaw)4774DPRINTF(sc, ATH_DEBUG_SW_TX,4775"%s: wasn't added: seqno %d\n",4776__func__, SEQNO(bf->bf_state.bfs_seqno));4777}4778bf = bf->bf_next;4779}47804781if (atid->incomp == 0) {4782DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,4783"%s: TID %d: cleaned up! resume!\n",4784__func__, tid);4785atid->cleanup_inprogress = 0;4786ath_tx_tid_resume(sc, atid);4787}47884789/* Send BAR if required */4790/* XXX why would we send a BAR when transitioning to non-aggregation? */4791/*4792* XXX TODO: we should likely just tear down the BAR state here,4793* rather than sending a BAR.4794*/4795if (ath_tx_tid_bar_tx_ready(sc, atid))4796ath_tx_tid_bar_tx(sc, atid);47974798ATH_TX_UNLOCK(sc);47994800/* Handle frame completion as individual frames */4801bf = bf_first;4802while (bf) {4803bf_next = bf->bf_next;4804bf->bf_next = NULL;4805ath_tx_default_comp(sc, bf, 1);4806bf = bf_next;4807}4808}48094810/*4811* Handle completion of an set of aggregate frames.4812*4813* Note: the completion handler is the last descriptor in the aggregate,4814* not the last descriptor in the first frame.4815*/4816static void4817ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,4818int fail)4819{4820//struct ath_desc *ds = bf->bf_lastds;4821struct ieee80211_node *ni = bf_first->bf_node;4822struct ath_node *an = ATH_NODE(ni);4823int tid = bf_first->bf_state.bfs_tid;4824struct ath_tid *atid = &an->an_tid[tid];4825struct ath_tx_status ts;4826struct ieee80211_tx_ampdu *tap;4827ath_bufhead bf_q;4828ath_bufhead bf_cq;4829int seq_st, tx_ok;4830int hasba, isaggr;4831uint32_t ba[2];4832struct ath_buf *bf, *bf_next;4833int ba_index;4834int drops = 0;4835int nframes = 0, nbad = 0, nf;4836int pktlen;4837int agglen, rc_agglen;4838/* XXX there's too much on the stack? */4839struct ath_rc_series rc[ATH_RC_NUM];4840int txseq;48414842DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",4843__func__, atid->hwq_depth);48444845/*4846* Take a copy; this may be needed -after- bf_first4847* has been completed and freed.4848*/4849ts = bf_first->bf_status.ds_txstat;4850agglen = bf_first->bf_state.bfs_al;4851rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;48524853TAILQ_INIT(&bf_q);4854TAILQ_INIT(&bf_cq);48554856/* The TID state is kept behind the TXQ lock */4857ATH_TX_LOCK(sc);48584859atid->hwq_depth--;4860if (atid->hwq_depth < 0)4861DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",4862__func__, atid->hwq_depth);48634864/*4865* If the TID is filtered, handle completing the filter4866* transition before potentially kicking it to the cleanup4867* function.4868*4869* XXX this is duplicate work, ew.4870*/4871if (atid->isfiltered)4872ath_tx_tid_filt_comp_complete(sc, atid);48734874/*4875* Punt cleanup to the relevant function, not our problem now4876*/4877if (atid->cleanup_inprogress) {4878if (atid->isfiltered)4879DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,4880"%s: isfiltered=1, normal_comp?\n",4881__func__);4882ATH_TX_UNLOCK(sc);4883ath_tx_comp_cleanup_aggr(sc, bf_first);4884return;4885}48864887/*4888* If the frame is filtered, transition to filtered frame4889* mode and add this to the filtered frame list.4890*4891* XXX TODO: figure out how this interoperates with4892* BAR, pause and cleanup states.4893*/4894if ((ts.ts_status & HAL_TXERR_FILT) ||4895(ts.ts_status != 0 && atid->isfiltered)) {4896if (fail != 0)4897DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,4898"%s: isfiltered=1, fail=%d\n", __func__, fail);4899ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);49004901/* Remove from BAW */4902TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {4903if (bf->bf_state.bfs_addedbaw)4904drops++;4905if (bf->bf_state.bfs_dobaw) {4906ath_tx_update_baw(sc, an, atid, bf);4907if (!bf->bf_state.bfs_addedbaw)4908DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,4909"%s: wasn't added: seqno %d\n",4910__func__,4911SEQNO(bf->bf_state.bfs_seqno));4912}4913bf->bf_state.bfs_dobaw = 0;4914}4915/*4916* If any intermediate frames in the BAW were dropped when4917* handling filtering things, send a BAR.4918*/4919if (drops)4920ath_tx_tid_bar_suspend(sc, atid);49214922/*4923* Finish up by sending a BAR if required and freeing4924* the frames outside of the TX lock.4925*/4926goto finish_send_bar;4927}49284929/*4930* XXX for now, use the first frame in the aggregate for4931* XXX rate control completion; it's at least consistent.4932*/4933pktlen = bf_first->bf_state.bfs_pktlen;49344935/*4936* Handle errors first!4937*4938* Here, handle _any_ error as a "exceeded retries" error.4939* Later on (when filtered frames are to be specially handled)4940* it'll have to be expanded.4941*/4942#if 04943if (ts.ts_status & HAL_TXERR_XRETRY) {4944#endif4945if (ts.ts_status != 0) {4946ATH_TX_UNLOCK(sc);4947ath_tx_comp_aggr_error(sc, bf_first, atid);4948return;4949}49504951tap = ath_tx_get_tx_tid(an, tid);49524953/*4954* extract starting sequence and block-ack bitmap4955*/4956/* XXX endian-ness of seq_st, ba? */4957seq_st = ts.ts_seqnum;4958hasba = !! (ts.ts_flags & HAL_TX_BA);4959tx_ok = (ts.ts_status == 0);4960isaggr = bf_first->bf_state.bfs_aggr;4961ba[0] = ts.ts_ba_low;4962ba[1] = ts.ts_ba_high;49634964/*4965* Copy the TX completion status and the rate control4966* series from the first descriptor, as it may be freed4967* before the rate control code can get its grubby fingers4968* into things.4969*/4970memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));49714972DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,4973"%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "4974"isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",4975__func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,4976isaggr, seq_st, hasba, ba[0], ba[1]);49774978/*4979* The reference driver doesn't do this; it simply ignores4980* this check in its entirety.4981*4982* I've seen this occur when using iperf to send traffic4983* out tid 1 - the aggregate frames are all marked as TID 1,4984* but the TXSTATUS has TID=0. So, let's just ignore this4985* check.4986*/4987#if 04988/* Occasionally, the MAC sends a tx status for the wrong TID. */4989if (tid != ts.ts_tid) {4990DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",4991__func__, tid, ts.ts_tid);4992tx_ok = 0;4993}4994#endif49954996/* AR5416 BA bug; this requires an interface reset */4997if (isaggr && tx_ok && (! hasba)) {4998device_printf(sc->sc_dev,4999"%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "5000"seq_st=%d\n",5001__func__, hasba, tx_ok, isaggr, seq_st);5002taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);5003/* And as we can't really trust the BA here .. */5004ba[0] = 0;5005ba[1] = 0;5006seq_st = 0;5007#ifdef ATH_DEBUG5008ath_printtxbuf(sc, bf_first,5009sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);5010#endif5011}50125013/*5014* Walk the list of frames, figure out which ones were correctly5015* sent and which weren't.5016*/5017bf = bf_first;5018nf = bf_first->bf_state.bfs_nframes;50195020/* bf_first is going to be invalid once this list is walked */5021bf_first = NULL;50225023/*5024* Walk the list of completed frames and determine5025* which need to be completed and which need to be5026* retransmitted.5027*5028* For completed frames, the completion functions need5029* to be called at the end of this function as the last5030* node reference may free the node.5031*5032* Finally, since the TXQ lock can't be held during the5033* completion callback (to avoid lock recursion),5034* the completion calls have to be done outside of the5035* lock.5036*/5037while (bf) {5038nframes++;5039ba_index = ATH_BA_INDEX(seq_st,5040SEQNO(bf->bf_state.bfs_seqno));5041bf_next = bf->bf_next;5042bf->bf_next = NULL; /* Remove it from the aggr list */50435044DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5045"%s: checking bf=%p seqno=%d; ack=%d\n",5046__func__, bf, SEQNO(bf->bf_state.bfs_seqno),5047ATH_BA_ISSET(ba, ba_index));50485049if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {5050sc->sc_stats.ast_tx_aggr_ok++;5051ath_tx_update_baw(sc, an, atid, bf);5052bf->bf_state.bfs_dobaw = 0;5053if (!bf->bf_state.bfs_addedbaw)5054DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5055"%s: wasn't added: seqno %d\n",5056__func__, SEQNO(bf->bf_state.bfs_seqno));5057bf->bf_next = NULL;5058TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);5059} else {5060sc->sc_stats.ast_tx_aggr_fail++;5061if (ath_tx_retry_subframe(sc, bf, &bf_q)) {5062drops++;5063bf->bf_next = NULL;5064TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);5065}5066nbad++;5067}5068bf = bf_next;5069}50705071/*5072* Now that the BAW updates have been done, unlock5073*5074* txseq is grabbed before the lock is released so we5075* have a consistent view of what -was- in the BAW.5076* Anything after this point will not yet have been5077* TXed.5078*/5079txseq = tap->txa_start;5080ATH_TX_UNLOCK(sc);50815082if (nframes != nf)5083DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5084"%s: num frames seen=%d; bf nframes=%d\n",5085__func__, nframes, nf);50865087/*5088* Now we know how many frames were bad, call the rate5089* control code.5090*/5091if (fail == 0) {5092ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen,5093nframes, nbad);5094}50955096/*5097* send bar if we dropped any frames5098*/5099if (drops) {5100/* Suspend the TX queue and get ready to send the BAR */5101ATH_TX_LOCK(sc);5102ath_tx_tid_bar_suspend(sc, atid);5103ATH_TX_UNLOCK(sc);5104}51055106DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5107"%s: txa_start now %d\n", __func__, tap->txa_start);51085109ATH_TX_LOCK(sc);51105111/* Prepend all frames to the beginning of the queue */5112while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {5113TAILQ_REMOVE(&bf_q, bf, bf_list);5114ATH_TID_INSERT_HEAD(atid, bf, bf_list);5115}51165117/*5118* Reschedule to grab some further frames.5119*/5120ath_tx_tid_sched(sc, atid);51215122/*5123* If the queue is filtered, re-schedule as required.5124*5125* This is required as there may be a subsequent TX descriptor5126* for this end-node that has CLRDMASK set, so it's quite possible5127* that a filtered frame will be followed by a non-filtered5128* (complete or otherwise) frame.5129*5130* XXX should we do this before we complete the frame?5131*/5132if (atid->isfiltered)5133ath_tx_tid_filt_comp_complete(sc, atid);51345135finish_send_bar:51365137/*5138* Send BAR if required5139*/5140if (ath_tx_tid_bar_tx_ready(sc, atid))5141ath_tx_tid_bar_tx(sc, atid);51425143ATH_TX_UNLOCK(sc);51445145/* Do deferred completion */5146while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {5147TAILQ_REMOVE(&bf_cq, bf, bf_list);5148ath_tx_default_comp(sc, bf, 0);5149}5150}51515152/*5153* Handle completion of unaggregated frames in an ADDBA5154* session.5155*5156* Fail is set to 1 if the entry is being freed via a call to5157* ath_tx_draintxq().5158*/5159static void5160ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)5161{5162struct ieee80211_node *ni = bf->bf_node;5163struct ath_node *an = ATH_NODE(ni);5164int tid = bf->bf_state.bfs_tid;5165struct ath_tid *atid = &an->an_tid[tid];5166struct ath_tx_status ts;5167int drops = 0;51685169/*5170* Take a copy of this; filtering/cloning the frame may free the5171* bf pointer.5172*/5173ts = bf->bf_status.ds_txstat;51745175/*5176* Update rate control status here, before we possibly5177* punt to retry or cleanup.5178*5179* Do it outside of the TXQ lock.5180*/5181if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))5182ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,5183&bf->bf_status.ds_txstat,5184bf->bf_state.bfs_pktlen,5185bf->bf_state.bfs_pktlen,51861, (ts.ts_status == 0) ? 0 : 1);51875188/*5189* This is called early so atid->hwq_depth can be tracked.5190* This unfortunately means that it's released and regrabbed5191* during retry and cleanup. That's rather inefficient.5192*/5193ATH_TX_LOCK(sc);51945195if (tid == IEEE80211_NONQOS_TID)5196DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);51975198DPRINTF(sc, ATH_DEBUG_SW_TX,5199"%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",5200__func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,5201SEQNO(bf->bf_state.bfs_seqno));52025203atid->hwq_depth--;5204if (atid->hwq_depth < 0)5205DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",5206__func__, atid->hwq_depth);52075208/*5209* If the TID is filtered, handle completing the filter5210* transition before potentially kicking it to the cleanup5211* function.5212*/5213if (atid->isfiltered)5214ath_tx_tid_filt_comp_complete(sc, atid);52155216/*5217* If a cleanup is in progress, punt to comp_cleanup;5218* rather than handling it here. It's thus their5219* responsibility to clean up, call the completion5220* function in net80211, etc.5221*/5222if (atid->cleanup_inprogress) {5223if (atid->isfiltered)5224DPRINTF(sc, ATH_DEBUG_SW_TX,5225"%s: isfiltered=1, normal_comp?\n",5226__func__);5227ATH_TX_UNLOCK(sc);5228DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",5229__func__);5230ath_tx_comp_cleanup_unaggr(sc, bf);5231return;5232}52335234/*5235* XXX TODO: how does cleanup, BAR and filtered frame handling5236* overlap?5237*5238* If the frame is filtered OR if it's any failure but5239* the TID is filtered, the frame must be added to the5240* filtered frame list.5241*5242* However - a busy buffer can't be added to the filtered5243* list as it will end up being recycled without having5244* been made available for the hardware.5245*/5246if ((ts.ts_status & HAL_TXERR_FILT) ||5247(ts.ts_status != 0 && atid->isfiltered)) {5248int freeframe;52495250if (fail != 0)5251DPRINTF(sc, ATH_DEBUG_SW_TX,5252"%s: isfiltered=1, fail=%d\n",5253__func__, fail);5254freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);5255/*5256* If freeframe=0 then bf is no longer ours; don't5257* touch it.5258*/5259if (freeframe) {5260/* Remove from BAW */5261if (bf->bf_state.bfs_addedbaw)5262drops++;5263if (bf->bf_state.bfs_dobaw) {5264ath_tx_update_baw(sc, an, atid, bf);5265if (!bf->bf_state.bfs_addedbaw)5266DPRINTF(sc, ATH_DEBUG_SW_TX,5267"%s: wasn't added: seqno %d\n",5268__func__, SEQNO(bf->bf_state.bfs_seqno));5269}5270bf->bf_state.bfs_dobaw = 0;5271}52725273/*5274* If the frame couldn't be filtered, treat it as a drop and5275* prepare to send a BAR.5276*/5277if (freeframe && drops)5278ath_tx_tid_bar_suspend(sc, atid);52795280/*5281* Send BAR if required5282*/5283if (ath_tx_tid_bar_tx_ready(sc, atid))5284ath_tx_tid_bar_tx(sc, atid);52855286ATH_TX_UNLOCK(sc);5287/*5288* If freeframe is set, then the frame couldn't be5289* cloned and bf is still valid. Just complete/free it.5290*/5291if (freeframe)5292ath_tx_default_comp(sc, bf, fail);52935294return;5295}5296/*5297* Don't bother with the retry check if all frames5298* are being failed (eg during queue deletion.)5299*/5300#if 05301if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {5302#endif5303if (fail == 0 && ts.ts_status != 0) {5304ATH_TX_UNLOCK(sc);5305DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",5306__func__);5307ath_tx_aggr_retry_unaggr(sc, bf);5308return;5309}53105311/* Success? Complete */5312DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",5313__func__, tid, SEQNO(bf->bf_state.bfs_seqno));5314if (bf->bf_state.bfs_dobaw) {5315ath_tx_update_baw(sc, an, atid, bf);5316bf->bf_state.bfs_dobaw = 0;5317if (!bf->bf_state.bfs_addedbaw)5318DPRINTF(sc, ATH_DEBUG_SW_TX,5319"%s: wasn't added: seqno %d\n",5320__func__, SEQNO(bf->bf_state.bfs_seqno));5321}53225323/*5324* If the queue is filtered, re-schedule as required.5325*5326* This is required as there may be a subsequent TX descriptor5327* for this end-node that has CLRDMASK set, so it's quite possible5328* that a filtered frame will be followed by a non-filtered5329* (complete or otherwise) frame.5330*5331* XXX should we do this before we complete the frame?5332*/5333if (atid->isfiltered)5334ath_tx_tid_filt_comp_complete(sc, atid);53355336/*5337* Send BAR if required5338*/5339if (ath_tx_tid_bar_tx_ready(sc, atid))5340ath_tx_tid_bar_tx(sc, atid);53415342ATH_TX_UNLOCK(sc);53435344ath_tx_default_comp(sc, bf, fail);5345/* bf is freed at this point */5346}53475348void5349ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)5350{5351if (bf->bf_state.bfs_aggr)5352ath_tx_aggr_comp_aggr(sc, bf, fail);5353else5354ath_tx_aggr_comp_unaggr(sc, bf, fail);5355}53565357/*5358* Grab the software queue depth that we COULD transmit.5359*5360* This includes checks if it's in the BAW, whether it's a frame5361* that is supposed to be in the BAW. Other checks could be done;5362* but for now let's try and avoid doing the whole of ath_tx_form_aggr()5363* here.5364*/5365static int5366ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an,5367struct ath_tid *tid)5368{5369struct ath_buf *bf;5370struct ieee80211_tx_ampdu *tap;5371int nbytes = 0;53725373ATH_TX_LOCK_ASSERT(sc);53745375tap = ath_tx_get_tx_tid(an, tid->tid);53765377/*5378* Iterate over each buffer and sum the pkt_len.5379* Bail if we exceed ATH_AGGR_MAXSIZE bytes; we won't5380* ever queue more than that in a single frame.5381*/5382TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {5383/*5384* TODO: I'm not sure if we're going to hit cases where5385* no frames get sent because the list is empty.5386*/53875388/* Check if it's in the BAW */5389if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,5390SEQNO(bf->bf_state.bfs_seqno)))) {5391break;5392}53935394/* Check if it's even supposed to be in the BAW */5395if (! bf->bf_state.bfs_dobaw) {5396break;5397}53985399nbytes += bf->bf_state.bfs_pktlen;5400if (nbytes >= ATH_AGGR_MAXSIZE)5401break;54025403/*5404* Check if we're likely going to leak a frame5405* as part of a PSPOLL. Break out at this point;5406* we're only going to send a single frame anyway.5407*/5408if (an->an_leak_count) {5409break;5410}5411}54125413return MIN(nbytes, ATH_AGGR_MAXSIZE);5414}54155416/*5417* Schedule some packets from the given node/TID to the hardware.5418*5419* This is the aggregate version.5420*/5421void5422ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,5423struct ath_tid *tid)5424{5425struct ath_buf *bf;5426struct ath_txq *txq = sc->sc_ac2q[tid->ac];5427struct ieee80211_tx_ampdu *tap;5428ATH_AGGR_STATUS status;5429ath_bufhead bf_q;5430int swq_pktbytes;54315432DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);5433ATH_TX_LOCK_ASSERT(sc);54345435/*5436* XXX TODO: If we're called for a queue that we're leaking frames to,5437* ensure we only leak one.5438*/54395440tap = ath_tx_get_tx_tid(an, tid->tid);54415442if (tid->tid == IEEE80211_NONQOS_TID)5443DPRINTF(sc, ATH_DEBUG_SW_TX,5444"%s: called for TID=NONQOS_TID?\n", __func__);54455446for (;;) {5447status = ATH_AGGR_DONE;54485449/*5450* If the upper layer has paused the TID, don't5451* queue any further packets.5452*5453* This can also occur from the completion task because5454* of packet loss; but as its serialised with this code,5455* it won't "appear" half way through queuing packets.5456*/5457if (! ath_tx_tid_can_tx_or_sched(sc, tid))5458break;54595460bf = ATH_TID_FIRST(tid);5461if (bf == NULL) {5462break;5463}54645465/*5466* If the packet doesn't fall within the BAW (eg a NULL5467* data frame), schedule it directly; continue.5468*/5469if (! bf->bf_state.bfs_dobaw) {5470DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5471"%s: non-baw packet\n",5472__func__);5473ATH_TID_REMOVE(tid, bf, bf_list);54745475if (bf->bf_state.bfs_nframes > 1)5476DPRINTF(sc, ATH_DEBUG_SW_TX,5477"%s: aggr=%d, nframes=%d\n",5478__func__,5479bf->bf_state.bfs_aggr,5480bf->bf_state.bfs_nframes);54815482/*5483* This shouldn't happen - such frames shouldn't5484* ever have been queued as an aggregate in the5485* first place. However, make sure the fields5486* are correctly setup just to be totally sure.5487*/5488bf->bf_state.bfs_aggr = 0;5489bf->bf_state.bfs_nframes = 1;54905491/* Update CLRDMASK just before this frame is queued */5492ath_tx_update_clrdmask(sc, tid, bf);54935494ath_tx_do_ratelookup(sc, bf, tid->tid,5495bf->bf_state.bfs_pktlen, false);5496ath_tx_calc_duration(sc, bf);5497ath_tx_calc_protection(sc, bf);5498ath_tx_set_rtscts(sc, bf);5499ath_tx_rate_fill_rcflags(sc, bf);5500ath_tx_setds(sc, bf);5501ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);55025503sc->sc_aggr_stats.aggr_nonbaw_pkt++;55045505/* Queue the packet; continue */5506goto queuepkt;5507}55085509TAILQ_INIT(&bf_q);55105511/*5512* Loop over the swq to find out how long5513* each packet is (up until 64k) and provide that5514* to the rate control lookup.5515*/5516swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid);5517ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);55185519/*5520* Note this only is used for the fragment paths and5521* should really be rethought out if we want to do5522* things like an RTS burst across >1 aggregate.5523*/5524ath_tx_calc_duration(sc, bf);5525ath_tx_calc_protection(sc, bf);55265527ath_tx_set_rtscts(sc, bf);5528ath_tx_rate_fill_rcflags(sc, bf);55295530status = ath_tx_form_aggr(sc, an, tid, &bf_q);55315532DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5533"%s: ath_tx_form_aggr() status=%d\n", __func__, status);55345535/*5536* No frames to be picked up - out of BAW5537*/5538if (TAILQ_EMPTY(&bf_q))5539break;55405541/*5542* This assumes that the descriptor list in the ath_bufhead5543* are already linked together via bf_next pointers.5544*/5545bf = TAILQ_FIRST(&bf_q);55465547if (status == ATH_AGGR_8K_LIMITED)5548sc->sc_aggr_stats.aggr_rts_aggr_limited++;55495550/*5551* If it's the only frame send as non-aggregate5552* assume that ath_tx_form_aggr() has checked5553* whether it's in the BAW and added it appropriately.5554*/5555if (bf->bf_state.bfs_nframes == 1) {5556DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5557"%s: single-frame aggregate\n", __func__);55585559/* Update CLRDMASK just before this frame is queued */5560ath_tx_update_clrdmask(sc, tid, bf);55615562bf->bf_state.bfs_aggr = 0;5563bf->bf_state.bfs_ndelim = 0;5564ath_tx_setds(sc, bf);5565ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);5566if (status == ATH_AGGR_BAW_CLOSED)5567sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;5568else5569sc->sc_aggr_stats.aggr_single_pkt++;5570} else {5571DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,5572"%s: multi-frame aggregate: %d frames, "5573"length %d\n",5574__func__, bf->bf_state.bfs_nframes,5575bf->bf_state.bfs_al);5576bf->bf_state.bfs_aggr = 1;5577sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;5578sc->sc_aggr_stats.aggr_aggr_pkt++;55795580/* Update CLRDMASK just before this frame is queued */5581ath_tx_update_clrdmask(sc, tid, bf);55825583/*5584* Calculate the duration/protection as required.5585*/5586ath_tx_calc_duration(sc, bf);5587ath_tx_calc_protection(sc, bf);55885589/*5590* Update the rate and rtscts information based on the5591* rate decision made by the rate control code;5592* the first frame in the aggregate needs it.5593*/5594ath_tx_set_rtscts(sc, bf);55955596/*5597* Setup the relevant descriptor fields5598* for aggregation. The first descriptor5599* already points to the rest in the chain.5600*/5601ath_tx_setds_11n(sc, bf);5602}5603queuepkt:5604/* Set completion handler, multi-frame aggregate or not */5605bf->bf_comp = ath_tx_aggr_comp;56065607if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)5608DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);56095610/*5611* Update leak count and frame config if were leaking frames.5612*5613* XXX TODO: it should update all frames in an aggregate5614* correctly!5615*/5616ath_tx_leak_count_update(sc, tid, bf);56175618/* Punt to txq */5619ath_tx_handoff(sc, txq, bf);56205621/* Track outstanding buffer count to hardware */5622/* aggregates are "one" buffer */5623tid->hwq_depth++;56245625/*5626* Break out if ath_tx_form_aggr() indicated5627* there can't be any further progress (eg BAW is full.)5628* Checking for an empty txq is done above.5629*5630* XXX locking on txq here?5631*/5632/* XXX TXQ locking */5633if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||5634(status == ATH_AGGR_BAW_CLOSED ||5635status == ATH_AGGR_LEAK_CLOSED))5636break;5637}5638}56395640/*5641* Schedule some packets from the given node/TID to the hardware.5642*5643* XXX TODO: this routine doesn't enforce the maximum TXQ depth.5644* It just dumps frames into the TXQ. We should limit how deep5645* the transmit queue can grow for frames dispatched to the given5646* TXQ.5647*5648* To avoid locking issues, either we need to own the TXQ lock5649* at this point, or we need to pass in the maximum frame count5650* from the caller.5651*/5652void5653ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,5654struct ath_tid *tid)5655{5656struct ath_buf *bf;5657struct ath_txq *txq = sc->sc_ac2q[tid->ac];56585659DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",5660__func__, an, tid->tid);56615662ATH_TX_LOCK_ASSERT(sc);56635664/* Check - is AMPDU pending or running? then print out something */5665if (ath_tx_ampdu_pending(sc, an, tid->tid))5666DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",5667__func__, tid->tid);5668if (ath_tx_ampdu_running(sc, an, tid->tid))5669DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",5670__func__, tid->tid);56715672for (;;) {5673/*5674* If the upper layers have paused the TID, don't5675* queue any further packets.5676*5677* XXX if we are leaking frames, make sure we decrement5678* that counter _and_ we continue here.5679*/5680if (! ath_tx_tid_can_tx_or_sched(sc, tid))5681break;56825683bf = ATH_TID_FIRST(tid);5684if (bf == NULL) {5685break;5686}56875688ATH_TID_REMOVE(tid, bf, bf_list);56895690/* Sanity check! */5691if (tid->tid != bf->bf_state.bfs_tid) {5692DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="5693" tid %d\n", __func__, bf->bf_state.bfs_tid,5694tid->tid);5695}5696/* Normal completion handler */5697bf->bf_comp = ath_tx_normal_comp;56985699/*5700* Override this for now, until the non-aggregate5701* completion handler correctly handles software retransmits.5702*/5703bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;57045705/* Update CLRDMASK just before this frame is queued */5706ath_tx_update_clrdmask(sc, tid, bf);57075708/* Program descriptors + rate control */5709ath_tx_do_ratelookup(sc, bf, tid->tid,5710bf->bf_state.bfs_pktlen, false);5711ath_tx_calc_duration(sc, bf);5712ath_tx_calc_protection(sc, bf);5713ath_tx_set_rtscts(sc, bf);5714ath_tx_rate_fill_rcflags(sc, bf);5715ath_tx_setds(sc, bf);57165717/*5718* Update the current leak count if5719* we're leaking frames; and set the5720* MORE flag as appropriate.5721*/5722ath_tx_leak_count_update(sc, tid, bf);57235724/* Track outstanding buffer count to hardware */5725/* aggregates are "one" buffer */5726tid->hwq_depth++;57275728/* Punt to hardware or software txq */5729ath_tx_handoff(sc, txq, bf);5730}5731}57325733/*5734* Schedule some packets to the given hardware queue.5735*5736* This function walks the list of TIDs (ie, ath_node TIDs5737* with queued traffic) and attempts to schedule traffic5738* from them.5739*5740* TID scheduling is implemented as a FIFO, with TIDs being5741* added to the end of the queue after some frames have been5742* scheduled.5743*/5744void5745ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)5746{5747struct ath_tid *tid, *next, *last;57485749ATH_TX_LOCK_ASSERT(sc);57505751/*5752* For non-EDMA chips, aggr frames that have been built are5753* in axq_aggr_depth, whether they've been scheduled or not.5754* There's no FIFO, so txq->axq_depth is what's been scheduled5755* to the hardware.5756*5757* For EDMA chips, we do it in two stages. The existing code5758* builds a list of frames to go to the hardware and the EDMA5759* code turns it into a single entry to push into the FIFO.5760* That way we don't take up one packet per FIFO slot.5761* We do push one aggregate per FIFO slot though, just to keep5762* things simple.5763*5764* The FIFO depth is what's in the hardware; the txq->axq_depth5765* is what's been scheduled to the FIFO.5766*5767* fifo.axq_depth is the number of frames (or aggregates) pushed5768* into the EDMA FIFO. For multi-frame lists, this is the number5769* of frames pushed in.5770* axq_fifo_depth is the number of FIFO slots currently busy.5771*/57725773/* For EDMA and non-EDMA, check built/scheduled against aggr limit */5774if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {5775sc->sc_aggr_stats.aggr_sched_nopkt++;5776return;5777}57785779/*5780* For non-EDMA chips, axq_depth is the "what's scheduled to5781* the hardware list". For EDMA it's "What's built for the hardware"5782* and fifo.axq_depth is how many frames have been dispatched5783* already to the hardware.5784*/5785if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {5786sc->sc_aggr_stats.aggr_sched_nopkt++;5787return;5788}57895790last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);57915792TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {5793/*5794* Suspend paused queues here; they'll be resumed5795* once the addba completes or times out.5796*/5797DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",5798__func__, tid->tid, tid->paused);5799ath_tx_tid_unsched(sc, tid);5800/*5801* This node may be in power-save and we're leaking5802* a frame; be careful.5803*/5804if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {5805goto loop_done;5806}5807if (ath_tx_ampdu_running(sc, tid->an, tid->tid))5808ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);5809else5810ath_tx_tid_hw_queue_norm(sc, tid->an, tid);58115812/* Not empty? Re-schedule */5813if (tid->axq_depth != 0)5814ath_tx_tid_sched(sc, tid);58155816/*5817* Give the software queue time to aggregate more5818* packets. If we aren't running aggregation then5819* we should still limit the hardware queue depth.5820*/5821/* XXX TXQ locking */5822if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {5823break;5824}5825if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {5826break;5827}5828loop_done:5829/*5830* If this was the last entry on the original list, stop.5831* Otherwise nodes that have been rescheduled onto the end5832* of the TID FIFO list will just keep being rescheduled.5833*5834* XXX What should we do about nodes that were paused5835* but are pending a leaking frame in response to a ps-poll?5836* They'll be put at the front of the list; so they'll5837* prematurely trigger this condition! Ew.5838*/5839if (tid == last)5840break;5841}5842}58435844/*5845* TX addba handling5846*/58475848/*5849* Return net80211 TID struct pointer, or NULL for none5850*/5851struct ieee80211_tx_ampdu *5852ath_tx_get_tx_tid(struct ath_node *an, int tid)5853{5854struct ieee80211_node *ni = &an->an_node;5855struct ieee80211_tx_ampdu *tap;58565857if (tid == IEEE80211_NONQOS_TID)5858return NULL;58595860tap = &ni->ni_tx_ampdu[tid];5861return tap;5862}58635864/*5865* Is AMPDU-TX running?5866*/5867static int5868ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)5869{5870struct ieee80211_tx_ampdu *tap;58715872if (tid == IEEE80211_NONQOS_TID)5873return 0;58745875tap = ath_tx_get_tx_tid(an, tid);5876if (tap == NULL)5877return 0; /* Not valid; default to not running */58785879return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);5880}58815882/*5883* Is AMPDU-TX negotiation pending?5884*/5885static int5886ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)5887{5888struct ieee80211_tx_ampdu *tap;58895890if (tid == IEEE80211_NONQOS_TID)5891return 0;58925893tap = ath_tx_get_tx_tid(an, tid);5894if (tap == NULL)5895return 0; /* Not valid; default to not pending */58965897return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);5898}58995900/*5901* Is AMPDU-TX pending for the given TID?5902*/59035904/*5905* Method to handle sending an ADDBA request.5906*5907* We tap this so the relevant flags can be set to pause the TID5908* whilst waiting for the response.5909*5910* XXX there's no timeout handler we can override?5911*/5912int5913ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,5914int dialogtoken, int baparamset, int batimeout)5915{5916struct ath_softc *sc = ni->ni_ic->ic_softc;5917int tid = tap->txa_tid;5918struct ath_node *an = ATH_NODE(ni);5919struct ath_tid *atid = &an->an_tid[tid];59205921/*5922* XXX danger Will Robinson!5923*5924* Although the taskqueue may be running and scheduling some more5925* packets, these should all be _before_ the addba sequence number.5926* However, net80211 will keep self-assigning sequence numbers5927* until addba has been negotiated.5928*5929* In the past, these packets would be "paused" (which still works5930* fine, as they're being scheduled to the driver in the same5931* serialised method which is calling the addba request routine)5932* and when the aggregation session begins, they'll be dequeued5933* as aggregate packets and added to the BAW. However, now there's5934* a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these5935* packets. Thus they never get included in the BAW tracking and5936* this can cause the initial burst of packets after the addba5937* negotiation to "hang", as they quickly fall outside the BAW.5938*5939* The "eventual" solution should be to tag these packets with5940* dobaw. Although net80211 has given us a sequence number,5941* it'll be "after" the left edge of the BAW and thus it'll5942* fall within it.5943*/5944ATH_TX_LOCK(sc);5945/*5946* This is a bit annoying. Until net80211 HT code inherits some5947* (any) locking, we may have this called in parallel BUT only5948* one response/timeout will be called. Grr.5949*/5950if (atid->addba_tx_pending == 0) {5951ath_tx_tid_pause(sc, atid);5952atid->addba_tx_pending = 1;5953}5954ATH_TX_UNLOCK(sc);59555956DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,5957"%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",5958__func__,5959ni->ni_macaddr,5960":",5961dialogtoken, baparamset, batimeout);5962DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,5963"%s: txa_start=%d, ni_txseqs=%d\n",5964__func__, tap->txa_start, ni->ni_txseqs[tid]);59655966return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,5967batimeout);5968}59695970/*5971* Handle an ADDBA response.5972*5973* We unpause the queue so TX'ing can resume.5974*5975* Any packets TX'ed from this point should be "aggregate" (whether5976* aggregate or not) so the BAW is updated.5977*5978* Note! net80211 keeps self-assigning sequence numbers until5979* ampdu is negotiated. This means the initially-negotiated BAW left5980* edge won't match the ni->ni_txseq.5981*5982* So, being very dirty, the BAW left edge is "slid" here to match5983* ni->ni_txseq.5984*5985* What likely SHOULD happen is that all packets subsequent to the5986* addba request should be tagged as aggregate and queued as non-aggregate5987* frames; thus updating the BAW. For now though, I'll just slide the5988* window.5989*/5990int5991ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,5992int status, int code, int batimeout)5993{5994struct ath_softc *sc = ni->ni_ic->ic_softc;5995int tid = tap->txa_tid;5996struct ath_node *an = ATH_NODE(ni);5997struct ath_tid *atid = &an->an_tid[tid];5998int r;59996000DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,6001"%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,6002ni->ni_macaddr,6003":",6004status, code, batimeout);60056006DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,6007"%s: txa_start=%d, ni_txseqs=%d\n",6008__func__, tap->txa_start, ni->ni_txseqs[tid]);60096010/*6011* Call this first, so the interface flags get updated6012* before the TID is unpaused. Otherwise a race condition6013* exists where the unpaused TID still doesn't yet have6014* IEEE80211_AGGR_RUNNING set.6015*/6016r = sc->sc_addba_response(ni, tap, status, code, batimeout);60176018ATH_TX_LOCK(sc);6019atid->addba_tx_pending = 0;6020/*6021* XXX dirty!6022* Slide the BAW left edge to wherever net80211 left it for us.6023* Read above for more information.6024*/6025tap->txa_start = ni->ni_txseqs[tid];6026ath_tx_tid_resume(sc, atid);6027ATH_TX_UNLOCK(sc);6028return r;6029}60306031/*6032* Stop ADDBA on a queue.6033*6034* This can be called whilst BAR TX is currently active on the queue,6035* so make sure this is unblocked before continuing.6036*/6037void6038ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)6039{6040struct ath_softc *sc = ni->ni_ic->ic_softc;6041int tid = tap->txa_tid;6042struct ath_node *an = ATH_NODE(ni);6043struct ath_tid *atid = &an->an_tid[tid];6044ath_bufhead bf_cq;6045struct ath_buf *bf;60466047DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",6048__func__,6049ni->ni_macaddr,6050":");60516052/*6053* Pause TID traffic early, so there aren't any races6054* Unblock the pending BAR held traffic, if it's currently paused.6055*/6056ATH_TX_LOCK(sc);6057ath_tx_tid_pause(sc, atid);6058if (atid->bar_wait) {6059/*6060* bar_unsuspend() expects bar_tx == 1, as it should be6061* called from the TX completion path. This quietens6062* the warning. It's cleared for us anyway.6063*/6064atid->bar_tx = 1;6065ath_tx_tid_bar_unsuspend(sc, atid);6066}6067ATH_TX_UNLOCK(sc);60686069/* There's no need to hold the TXQ lock here */6070sc->sc_addba_stop(ni, tap);60716072/*6073* ath_tx_tid_cleanup will resume the TID if possible, otherwise6074* it'll set the cleanup flag, and it'll be unpaused once6075* things have been cleaned up.6076*/6077TAILQ_INIT(&bf_cq);6078ATH_TX_LOCK(sc);60796080/*6081* In case there's a followup call to this, only call it6082* if we don't have a cleanup in progress.6083*6084* Since we've paused the queue above, we need to make6085* sure we unpause if there's already a cleanup in6086* progress - it means something else is also doing6087* this stuff, so we don't need to also keep it paused.6088*/6089if (atid->cleanup_inprogress) {6090ath_tx_tid_resume(sc, atid);6091} else {6092ath_tx_tid_cleanup(sc, an, tid, &bf_cq);6093/*6094* Unpause the TID if no cleanup is required.6095*/6096if (! atid->cleanup_inprogress)6097ath_tx_tid_resume(sc, atid);6098}6099ATH_TX_UNLOCK(sc);61006101/* Handle completing frames and fail them */6102while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {6103TAILQ_REMOVE(&bf_cq, bf, bf_list);6104ath_tx_default_comp(sc, bf, 1);6105}61066107}61086109/*6110* Handle a node reassociation.6111*6112* We may have a bunch of frames queued to the hardware; those need6113* to be marked as cleanup.6114*/6115void6116ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)6117{6118struct ath_tid *tid;6119int i;6120ath_bufhead bf_cq;6121struct ath_buf *bf;61226123TAILQ_INIT(&bf_cq);61246125ATH_TX_UNLOCK_ASSERT(sc);61266127ATH_TX_LOCK(sc);6128for (i = 0; i < IEEE80211_TID_SIZE; i++) {6129tid = &an->an_tid[i];6130if (tid->hwq_depth == 0)6131continue;6132DPRINTF(sc, ATH_DEBUG_NODE,6133"%s: %6D: TID %d: cleaning up TID\n",6134__func__,6135an->an_node.ni_macaddr,6136":",6137i);6138/*6139* In case there's a followup call to this, only call it6140* if we don't have a cleanup in progress.6141*/6142if (! tid->cleanup_inprogress) {6143ath_tx_tid_pause(sc, tid);6144ath_tx_tid_cleanup(sc, an, i, &bf_cq);6145/*6146* Unpause the TID if no cleanup is required.6147*/6148if (! tid->cleanup_inprogress)6149ath_tx_tid_resume(sc, tid);6150}6151}6152ATH_TX_UNLOCK(sc);61536154/* Handle completing frames and fail them */6155while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {6156TAILQ_REMOVE(&bf_cq, bf, bf_list);6157ath_tx_default_comp(sc, bf, 1);6158}6159}61606161/*6162* Note: net80211 bar_timeout() doesn't call this function on BAR failure;6163* it simply tears down the aggregation session. Ew.6164*6165* It however will call ieee80211_ampdu_stop() which will call6166* ic->ic_addba_stop().6167*6168* XXX This uses a hard-coded max BAR count value; the whole6169* XXX BAR TX success or failure should be better handled!6170*/6171void6172ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,6173int status)6174{6175struct ath_softc *sc = ni->ni_ic->ic_softc;6176int tid = tap->txa_tid;6177struct ath_node *an = ATH_NODE(ni);6178struct ath_tid *atid = &an->an_tid[tid];6179int attempts = tap->txa_attempts;6180int old_txa_start;61816182DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,6183"%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",6184__func__,6185ni->ni_macaddr,6186":",6187tap->txa_tid,6188atid->tid,6189status,6190attempts,6191tap->txa_start,6192tap->txa_seqpending);61936194/* Note: This may update the BAW details */6195/*6196* XXX What if this does slide the BAW along? We need to somehow6197* XXX either fix things when it does happen, or prevent the6198* XXX seqpending value to be anything other than exactly what6199* XXX the hell we want!6200*6201* XXX So for now, how I do this inside the TX lock for now6202* XXX and just correct it afterwards? The below condition should6203* XXX never happen and if it does I need to fix all kinds of things.6204*/6205ATH_TX_LOCK(sc);6206old_txa_start = tap->txa_start;6207sc->sc_bar_response(ni, tap, status);6208if (tap->txa_start != old_txa_start) {6209device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",6210__func__,6211tid,6212tap->txa_start,6213old_txa_start);6214}6215tap->txa_start = old_txa_start;6216ATH_TX_UNLOCK(sc);62176218/* Unpause the TID */6219/*6220* XXX if this is attempt=50, the TID will be downgraded6221* XXX to a non-aggregate session. So we must unpause the6222* XXX TID here or it'll never be done.6223*6224* Also, don't call it if bar_tx/bar_wait are 0; something6225* has beaten us to the punch? (XXX figure out what?)6226*/6227if (status == 0 || attempts == 50) {6228ATH_TX_LOCK(sc);6229if (atid->bar_tx == 0 || atid->bar_wait == 0)6230DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,6231"%s: huh? bar_tx=%d, bar_wait=%d\n",6232__func__,6233atid->bar_tx, atid->bar_wait);6234else6235ath_tx_tid_bar_unsuspend(sc, atid);6236ATH_TX_UNLOCK(sc);6237}6238}62396240/*6241* This is called whenever the pending ADDBA request times out.6242* Unpause and reschedule the TID.6243*/6244void6245ath_addba_response_timeout(struct ieee80211_node *ni,6246struct ieee80211_tx_ampdu *tap)6247{6248struct ath_softc *sc = ni->ni_ic->ic_softc;6249int tid = tap->txa_tid;6250struct ath_node *an = ATH_NODE(ni);6251struct ath_tid *atid = &an->an_tid[tid];62526253DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,6254"%s: %6D: TID=%d, called; resuming\n",6255__func__,6256ni->ni_macaddr,6257":",6258tid);62596260ATH_TX_LOCK(sc);6261atid->addba_tx_pending = 0;6262ATH_TX_UNLOCK(sc);62636264/* Note: This updates the aggregate state to (again) pending */6265sc->sc_addba_response_timeout(ni, tap);62666267/* Unpause the TID; which reschedules it */6268ATH_TX_LOCK(sc);6269ath_tx_tid_resume(sc, atid);6270ATH_TX_UNLOCK(sc);6271}62726273/*6274* Check if a node is asleep or not.6275*/6276int6277ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)6278{62796280ATH_TX_LOCK_ASSERT(sc);62816282return (an->an_is_powersave);6283}62846285/*6286* Mark a node as currently "in powersaving."6287* This suspends all traffic on the node.6288*6289* This must be called with the node/tx locks free.6290*6291* XXX TODO: the locking silliness below is due to how the node6292* locking currently works. Right now, the node lock is grabbed6293* to do rate control lookups and these are done with the TX6294* queue lock held. This means the node lock can't be grabbed6295* first here or a LOR will occur.6296*6297* Eventually (hopefully!) the TX path code will only grab6298* the TXQ lock when transmitting and the ath_node lock when6299* doing node/TID operations. There are other complications -6300* the sched/unsched operations involve walking the per-txq6301* 'active tid' list and this requires both locks to be held.6302*/6303void6304ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)6305{6306struct ath_tid *atid;6307struct ath_txq *txq;6308int tid;63096310ATH_TX_UNLOCK_ASSERT(sc);63116312/* Suspend all traffic on the node */6313ATH_TX_LOCK(sc);63146315if (an->an_is_powersave) {6316DPRINTF(sc, ATH_DEBUG_XMIT,6317"%s: %6D: node was already asleep!\n",6318__func__, an->an_node.ni_macaddr, ":");6319ATH_TX_UNLOCK(sc);6320return;6321}63226323for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {6324atid = &an->an_tid[tid];6325txq = sc->sc_ac2q[atid->ac];63266327ath_tx_tid_pause(sc, atid);6328}63296330/* Mark node as in powersaving */6331an->an_is_powersave = 1;63326333ATH_TX_UNLOCK(sc);6334}63356336/*6337* Mark a node as currently "awake."6338* This resumes all traffic to the node.6339*/6340void6341ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)6342{6343struct ath_tid *atid;6344struct ath_txq *txq;6345int tid;63466347ATH_TX_UNLOCK_ASSERT(sc);63486349ATH_TX_LOCK(sc);63506351/* !? */6352if (an->an_is_powersave == 0) {6353ATH_TX_UNLOCK(sc);6354DPRINTF(sc, ATH_DEBUG_XMIT,6355"%s: an=%p: node was already awake\n",6356__func__, an);6357return;6358}63596360/* Mark node as awake */6361an->an_is_powersave = 0;6362/*6363* Clear any pending leaked frame requests6364*/6365an->an_leak_count = 0;63666367for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {6368atid = &an->an_tid[tid];6369txq = sc->sc_ac2q[atid->ac];63706371ath_tx_tid_resume(sc, atid);6372}6373ATH_TX_UNLOCK(sc);6374}63756376static int6377ath_legacy_dma_txsetup(struct ath_softc *sc)6378{63796380/* nothing new needed */6381return (0);6382}63836384static int6385ath_legacy_dma_txteardown(struct ath_softc *sc)6386{63876388/* nothing new needed */6389return (0);6390}63916392void6393ath_xmit_setup_legacy(struct ath_softc *sc)6394{6395/*6396* For now, just set the descriptor length to sizeof(ath_desc);6397* worry about extracting the real length out of the HAL later.6398*/6399sc->sc_tx_desclen = sizeof(struct ath_desc);6400sc->sc_tx_statuslen = sizeof(struct ath_desc);6401sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */64026403sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;6404sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;6405sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;64066407sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;6408sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;64096410sc->sc_tx.xmit_drain = ath_legacy_tx_drain;6411}641264136414