Path: blob/main/sys/contrib/dev/athk/ath10k/htt_rx.c
101206 views
// SPDX-License-Identifier: ISC1/*2* Copyright (c) 2005-2011 Atheros Communications Inc.3* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.4* Copyright (c) 2018, The Linux Foundation. All rights reserved.5* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.6* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.7*/89#include <linux/export.h>1011#include "core.h"12#include "htc.h"13#include "htt.h"14#include "txrx.h"15#include "debug.h"16#include "trace.h"17#include "mac.h"1819#include <linux/log2.h>20#include <linux/bitfield.h>2122/* when under memory pressure rx ring refill may fail and needs a retry */23#define HTT_RX_RING_REFILL_RETRY_MS 502425#define HTT_RX_RING_REFILL_RESCHED_MS 52627/* shortcut to interpret a raw memory buffer as a rx descriptor */28#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)2930static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);3132static struct sk_buff *33ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)34{35struct ath10k_skb_rxcb *rxcb;3637hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)38if (rxcb->paddr == paddr)39return ATH10K_RXCB_SKB(rxcb);4041WARN_ON_ONCE(1);42return NULL;43}4445static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)46{47struct sk_buff *skb;48struct ath10k_skb_rxcb *rxcb;49struct hlist_node *n;50int i;5152if (htt->rx_ring.in_ord_rx) {53hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {54skb = ATH10K_RXCB_SKB(rxcb);55dma_unmap_single(htt->ar->dev, rxcb->paddr,56skb->len + skb_tailroom(skb),57DMA_FROM_DEVICE);58hash_del(&rxcb->hlist);59dev_kfree_skb_any(skb);60}61} else {62for (i = 0; i < htt->rx_ring.size; i++) {63skb = htt->rx_ring.netbufs_ring[i];64if (!skb)65continue;6667rxcb = ATH10K_SKB_RXCB(skb);68dma_unmap_single(htt->ar->dev, rxcb->paddr,69skb->len + skb_tailroom(skb),70DMA_FROM_DEVICE);71dev_kfree_skb_any(skb);72}73}7475htt->rx_ring.fill_cnt = 0;76hash_init(htt->rx_ring.skb_table);77memset(htt->rx_ring.netbufs_ring, 0,78htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));79}8081static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)82{83return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);84}8586static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)87{88return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);89}9091static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,92void *vaddr)93{94htt->rx_ring.paddrs_ring_32 = vaddr;95}9697static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,98void *vaddr)99{100htt->rx_ring.paddrs_ring_64 = vaddr;101}102103static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,104dma_addr_t paddr, int idx)105{106htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);107}108109static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,110dma_addr_t paddr, int idx)111{112htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);113}114115static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)116{117htt->rx_ring.paddrs_ring_32[idx] = 0;118}119120static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)121{122htt->rx_ring.paddrs_ring_64[idx] = 0;123}124125static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)126{127return (void *)htt->rx_ring.paddrs_ring_32;128}129130static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)131{132return (void *)htt->rx_ring.paddrs_ring_64;133}134135static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)136{137struct ath10k_hw_params *hw = &htt->ar->hw_params;138struct htt_rx_desc *rx_desc;139struct ath10k_skb_rxcb *rxcb;140struct sk_buff *skb;141dma_addr_t paddr;142int ret = 0, idx;143144/* The Full Rx Reorder firmware has no way of telling the host145* implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.146* To keep things simple make sure ring is always half empty. This147* guarantees there'll be no replenishment overruns possible.148*/149BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);150151idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);152153if (idx < 0 || idx >= htt->rx_ring.size) {154ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");155idx &= htt->rx_ring.size_mask;156ret = -ENOMEM;157goto fail;158}159160while (num > 0) {161skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);162if (!skb) {163ret = -ENOMEM;164goto fail;165}166167if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))168skb_pull(skb,169PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -170skb->data);171172/* Clear rx_desc attention word before posting to Rx ring */173rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);174ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);175176paddr = dma_map_single(htt->ar->dev, skb->data,177skb->len + skb_tailroom(skb),178DMA_FROM_DEVICE);179180if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {181dev_kfree_skb_any(skb);182ret = -ENOMEM;183goto fail;184}185186rxcb = ATH10K_SKB_RXCB(skb);187rxcb->paddr = paddr;188htt->rx_ring.netbufs_ring[idx] = skb;189ath10k_htt_set_paddrs_ring(htt, paddr, idx);190htt->rx_ring.fill_cnt++;191192if (htt->rx_ring.in_ord_rx) {193hash_add(htt->rx_ring.skb_table,194&ATH10K_SKB_RXCB(skb)->hlist,195paddr);196}197198num--;199idx++;200idx &= htt->rx_ring.size_mask;201}202203fail:204/*205* Make sure the rx buffer is updated before available buffer206* index to avoid any potential rx ring corruption.207*/208mb();209*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);210return ret;211}212213static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)214{215lockdep_assert_held(&htt->rx_ring.lock);216return __ath10k_htt_rx_ring_fill_n(htt, num);217}218219static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)220{221int ret, num_deficit, num_to_fill;222223/* Refilling the whole RX ring buffer proves to be a bad idea. The224* reason is RX may take up significant amount of CPU cycles and starve225* other tasks, e.g. TX on an ethernet device while acting as a bridge226* with ath10k wlan interface. This ended up with very poor performance227* once CPU the host system was overwhelmed with RX on ath10k.228*229* By limiting the number of refills the replenishing occurs230* progressively. This in turns makes use of the fact tasklets are231* processed in FIFO order. This means actual RX processing can starve232* out refilling. If there's not enough buffers on RX ring FW will not233* report RX until it is refilled with enough buffers. This234* automatically balances load wrt to CPU power.235*236* This probably comes at a cost of lower maximum throughput but237* improves the average and stability.238*/239spin_lock_bh(&htt->rx_ring.lock);240num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;241num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);242num_deficit -= num_to_fill;243ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);244if (ret == -ENOMEM) {245/*246* Failed to fill it to the desired level -247* we'll start a timer and try again next time.248* As long as enough buffers are left in the ring for249* another A-MPDU rx, no special recovery is needed.250*/251mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +252msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));253} else if (num_deficit > 0) {254mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +255msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));256}257spin_unlock_bh(&htt->rx_ring.lock);258}259260static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)261{262struct ath10k_htt *htt = timer_container_of(htt, t,263rx_ring.refill_retry_timer);264265ath10k_htt_rx_msdu_buff_replenish(htt);266}267268int ath10k_htt_rx_ring_refill(struct ath10k *ar)269{270struct ath10k_htt *htt = &ar->htt;271int ret;272273if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)274return 0;275276spin_lock_bh(&htt->rx_ring.lock);277ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -278htt->rx_ring.fill_cnt));279280if (ret)281ath10k_htt_rx_ring_free(htt);282283spin_unlock_bh(&htt->rx_ring.lock);284285return ret;286}287288void ath10k_htt_rx_free(struct ath10k_htt *htt)289{290if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)291return;292293timer_delete_sync(&htt->rx_ring.refill_retry_timer);294295skb_queue_purge(&htt->rx_msdus_q);296skb_queue_purge(&htt->rx_in_ord_compl_q);297skb_queue_purge(&htt->tx_fetch_ind_q);298299spin_lock_bh(&htt->rx_ring.lock);300ath10k_htt_rx_ring_free(htt);301spin_unlock_bh(&htt->rx_ring.lock);302303dma_free_coherent(htt->ar->dev,304ath10k_htt_get_rx_ring_size(htt),305ath10k_htt_get_vaddr_ring(htt),306htt->rx_ring.base_paddr);307308ath10k_htt_config_paddrs_ring(htt, NULL);309310dma_free_coherent(htt->ar->dev,311sizeof(*htt->rx_ring.alloc_idx.vaddr),312htt->rx_ring.alloc_idx.vaddr,313htt->rx_ring.alloc_idx.paddr);314htt->rx_ring.alloc_idx.vaddr = NULL;315316kfree(htt->rx_ring.netbufs_ring);317htt->rx_ring.netbufs_ring = NULL;318}319320static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)321{322struct ath10k *ar = htt->ar;323int idx;324struct sk_buff *msdu;325326lockdep_assert_held(&htt->rx_ring.lock);327328if (htt->rx_ring.fill_cnt == 0) {329ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");330return NULL;331}332333idx = htt->rx_ring.sw_rd_idx.msdu_payld;334msdu = htt->rx_ring.netbufs_ring[idx];335htt->rx_ring.netbufs_ring[idx] = NULL;336ath10k_htt_reset_paddrs_ring(htt, idx);337338idx++;339idx &= htt->rx_ring.size_mask;340htt->rx_ring.sw_rd_idx.msdu_payld = idx;341htt->rx_ring.fill_cnt--;342343dma_unmap_single(htt->ar->dev,344ATH10K_SKB_RXCB(msdu)->paddr,345msdu->len + skb_tailroom(msdu),346DMA_FROM_DEVICE);347ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",348msdu->data, msdu->len + skb_tailroom(msdu));349350return msdu;351}352353/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */354static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,355struct sk_buff_head *amsdu)356{357struct ath10k *ar = htt->ar;358struct ath10k_hw_params *hw = &ar->hw_params;359int msdu_len, msdu_chaining = 0;360struct sk_buff *msdu;361struct htt_rx_desc *rx_desc;362struct rx_attention *rx_desc_attention;363struct rx_frag_info_common *rx_desc_frag_info_common;364struct rx_msdu_start_common *rx_desc_msdu_start_common;365struct rx_msdu_end_common *rx_desc_msdu_end_common;366367lockdep_assert_held(&htt->rx_ring.lock);368369for (;;) {370int last_msdu, msdu_len_invalid, msdu_chained;371372msdu = ath10k_htt_rx_netbuf_pop(htt);373if (!msdu) {374__skb_queue_purge(amsdu);375return -ENOENT;376}377378__skb_queue_tail(amsdu, msdu);379380rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);381rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);382rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,383rx_desc);384rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);385rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);386387/* FIXME: we must report msdu payload since this is what caller388* expects now389*/390skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);391skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);392393/*394* Sanity check - confirm the HW is finished filling in the395* rx data.396* If the HW and SW are working correctly, then it's guaranteed397* that the HW's MAC DMA is done before this point in the SW.398* To prevent the case that we handle a stale Rx descriptor,399* just assert for now until we have a way to recover.400*/401if (!(__le32_to_cpu(rx_desc_attention->flags)402& RX_ATTENTION_FLAGS_MSDU_DONE)) {403__skb_queue_purge(amsdu);404return -EIO;405}406407msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)408& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |409RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));410msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),411RX_MSDU_START_INFO0_MSDU_LENGTH);412msdu_chained = rx_desc_frag_info_common->ring2_more_count;413414if (msdu_len_invalid)415msdu_len = 0;416417skb_trim(msdu, 0);418skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));419msdu_len -= msdu->len;420421/* Note: Chained buffers do not contain rx descriptor */422while (msdu_chained--) {423msdu = ath10k_htt_rx_netbuf_pop(htt);424if (!msdu) {425__skb_queue_purge(amsdu);426return -ENOENT;427}428429__skb_queue_tail(amsdu, msdu);430skb_trim(msdu, 0);431skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));432msdu_len -= msdu->len;433msdu_chaining = 1;434}435436last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &437RX_MSDU_END_INFO0_LAST_MSDU;438439/* FIXME: why are we skipping the first part of the rx_desc? */440#if defined(__linux__)441trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),442#elif defined(__FreeBSD__)443trace_ath10k_htt_rx_desc(ar, (u8 *)rx_desc + sizeof(u32),444#endif445hw->rx_desc_ops->rx_desc_size - sizeof(u32));446447if (last_msdu)448break;449}450451if (skb_queue_empty(amsdu))452msdu_chaining = -1;453454/*455* Don't refill the ring yet.456*457* First, the elements popped here are still in use - it is not458* safe to overwrite them until the matching call to459* mpdu_desc_list_next. Second, for efficiency it is preferable to460* refill the rx ring with 1 PPDU's worth of rx buffers (something461* like 32 x 3 buffers), rather than one MPDU's worth of rx buffers462* (something like 3 buffers). Consequently, we'll rely on the txrx463* SW to tell us when it is done pulling all the PPDU's rx buffers464* out of the rx ring, and then refill it just once.465*/466467return msdu_chaining;468}469470static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,471u64 paddr)472{473struct ath10k *ar = htt->ar;474struct ath10k_skb_rxcb *rxcb;475struct sk_buff *msdu;476477lockdep_assert_held(&htt->rx_ring.lock);478479msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);480if (!msdu)481return NULL;482483rxcb = ATH10K_SKB_RXCB(msdu);484hash_del(&rxcb->hlist);485htt->rx_ring.fill_cnt--;486487dma_unmap_single(htt->ar->dev, rxcb->paddr,488msdu->len + skb_tailroom(msdu),489DMA_FROM_DEVICE);490ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",491msdu->data, msdu->len + skb_tailroom(msdu));492493return msdu;494}495496static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,497struct sk_buff *frag_list,498unsigned int frag_len)499{500skb_shinfo(skb_head)->frag_list = frag_list;501skb_head->data_len = frag_len;502skb_head->len += skb_head->data_len;503}504505static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,506struct sk_buff *msdu,507struct htt_rx_in_ord_msdu_desc **msdu_desc)508{509struct ath10k *ar = htt->ar;510struct ath10k_hw_params *hw = &ar->hw_params;511u32 paddr;512struct sk_buff *frag_buf;513struct sk_buff *prev_frag_buf;514u8 last_frag;515struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;516struct htt_rx_desc *rxd;517int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);518519rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);520trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);521522skb_put(msdu, hw->rx_desc_ops->rx_desc_size);523skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);524skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));525amsdu_len -= msdu->len;526527last_frag = ind_desc->reserved;528if (last_frag) {529if (amsdu_len) {530ath10k_warn(ar, "invalid amsdu len %u, left %d",531__le16_to_cpu(ind_desc->msdu_len),532amsdu_len);533}534return 0;535}536537ind_desc++;538paddr = __le32_to_cpu(ind_desc->msdu_paddr);539frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);540if (!frag_buf) {541ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);542return -ENOENT;543}544545skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));546ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);547548amsdu_len -= frag_buf->len;549prev_frag_buf = frag_buf;550last_frag = ind_desc->reserved;551while (!last_frag) {552ind_desc++;553paddr = __le32_to_cpu(ind_desc->msdu_paddr);554frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);555if (!frag_buf) {556ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",557paddr);558prev_frag_buf->next = NULL;559return -ENOENT;560}561562skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));563last_frag = ind_desc->reserved;564amsdu_len -= frag_buf->len;565566prev_frag_buf->next = frag_buf;567prev_frag_buf = frag_buf;568}569570if (amsdu_len) {571ath10k_warn(ar, "invalid amsdu len %u, left %d",572__le16_to_cpu(ind_desc->msdu_len), amsdu_len);573}574575*msdu_desc = ind_desc;576577prev_frag_buf->next = NULL;578return 0;579}580581static int582ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,583struct sk_buff *msdu,584struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)585{586struct ath10k *ar = htt->ar;587struct ath10k_hw_params *hw = &ar->hw_params;588u64 paddr;589struct sk_buff *frag_buf;590struct sk_buff *prev_frag_buf;591u8 last_frag;592struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;593struct htt_rx_desc *rxd;594int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);595596rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);597trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);598599skb_put(msdu, hw->rx_desc_ops->rx_desc_size);600skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);601skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));602amsdu_len -= msdu->len;603604last_frag = ind_desc->reserved;605if (last_frag) {606if (amsdu_len) {607ath10k_warn(ar, "invalid amsdu len %u, left %d",608__le16_to_cpu(ind_desc->msdu_len),609amsdu_len);610}611return 0;612}613614ind_desc++;615paddr = __le64_to_cpu(ind_desc->msdu_paddr);616frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);617if (!frag_buf) {618#if defined(__linux__)619ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);620#elif defined(__FreeBSD__)621ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%jx", (uintmax_t)paddr);622#endif623return -ENOENT;624}625626skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));627ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);628629amsdu_len -= frag_buf->len;630prev_frag_buf = frag_buf;631last_frag = ind_desc->reserved;632while (!last_frag) {633ind_desc++;634paddr = __le64_to_cpu(ind_desc->msdu_paddr);635frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);636if (!frag_buf) {637#if defined(__linux__)638ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",639paddr);640#elif defined(__FreeBSD__)641ath10k_warn(ar, "failed to pop frag-n paddr: 0x%jx",642(uintmax_t)paddr);643#endif644prev_frag_buf->next = NULL;645return -ENOENT;646}647648skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));649last_frag = ind_desc->reserved;650amsdu_len -= frag_buf->len;651652prev_frag_buf->next = frag_buf;653prev_frag_buf = frag_buf;654}655656if (amsdu_len) {657ath10k_warn(ar, "invalid amsdu len %u, left %d",658__le16_to_cpu(ind_desc->msdu_len), amsdu_len);659}660661*msdu_desc = ind_desc;662663prev_frag_buf->next = NULL;664return 0;665}666667static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,668struct htt_rx_in_ord_ind *ev,669struct sk_buff_head *list)670{671struct ath10k *ar = htt->ar;672struct ath10k_hw_params *hw = &ar->hw_params;673struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;674struct htt_rx_desc *rxd;675struct rx_attention *rxd_attention;676struct sk_buff *msdu;677int msdu_count, ret;678bool is_offload;679u32 paddr;680681lockdep_assert_held(&htt->rx_ring.lock);682683msdu_count = __le16_to_cpu(ev->msdu_count);684is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);685686while (msdu_count--) {687paddr = __le32_to_cpu(msdu_desc->msdu_paddr);688689msdu = ath10k_htt_rx_pop_paddr(htt, paddr);690if (!msdu) {691__skb_queue_purge(list);692return -ENOENT;693}694695if (!is_offload && ar->monitor_arvif) {696ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,697&msdu_desc);698if (ret) {699__skb_queue_purge(list);700return ret;701}702__skb_queue_tail(list, msdu);703msdu_desc++;704continue;705}706707__skb_queue_tail(list, msdu);708709if (!is_offload) {710rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);711rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);712713trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);714715skb_put(msdu, hw->rx_desc_ops->rx_desc_size);716skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);717skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));718719if (!(__le32_to_cpu(rxd_attention->flags) &720RX_ATTENTION_FLAGS_MSDU_DONE)) {721ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");722return -EIO;723}724}725726msdu_desc++;727}728729return 0;730}731732static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,733struct htt_rx_in_ord_ind *ev,734struct sk_buff_head *list)735{736struct ath10k *ar = htt->ar;737struct ath10k_hw_params *hw = &ar->hw_params;738struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;739struct htt_rx_desc *rxd;740struct rx_attention *rxd_attention;741struct sk_buff *msdu;742int msdu_count, ret;743bool is_offload;744u64 paddr;745746lockdep_assert_held(&htt->rx_ring.lock);747748msdu_count = __le16_to_cpu(ev->msdu_count);749is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);750751while (msdu_count--) {752paddr = __le64_to_cpu(msdu_desc->msdu_paddr);753msdu = ath10k_htt_rx_pop_paddr(htt, paddr);754if (!msdu) {755__skb_queue_purge(list);756return -ENOENT;757}758759if (!is_offload && ar->monitor_arvif) {760ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,761&msdu_desc);762if (ret) {763__skb_queue_purge(list);764return ret;765}766__skb_queue_tail(list, msdu);767msdu_desc++;768continue;769}770771__skb_queue_tail(list, msdu);772773if (!is_offload) {774rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);775rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);776777trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);778779skb_put(msdu, hw->rx_desc_ops->rx_desc_size);780skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);781skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));782783if (!(__le32_to_cpu(rxd_attention->flags) &784RX_ATTENTION_FLAGS_MSDU_DONE)) {785ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");786return -EIO;787}788}789790msdu_desc++;791}792793return 0;794}795796int ath10k_htt_rx_alloc(struct ath10k_htt *htt)797{798struct ath10k *ar = htt->ar;799dma_addr_t paddr;800void *vaddr, *vaddr_ring;801size_t size;802struct timer_list *timer = &htt->rx_ring.refill_retry_timer;803804if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)805return 0;806807htt->rx_confused = false;808809/* XXX: The fill level could be changed during runtime in response to810* the host processing latency. Is this really worth it?811*/812htt->rx_ring.size = HTT_RX_RING_SIZE;813htt->rx_ring.size_mask = htt->rx_ring.size - 1;814htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;815816if (!is_power_of_2(htt->rx_ring.size)) {817ath10k_warn(ar, "htt rx ring size is not power of 2\n");818return -EINVAL;819}820821htt->rx_ring.netbufs_ring =822kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),823GFP_KERNEL);824if (!htt->rx_ring.netbufs_ring)825goto err_netbuf;826827size = ath10k_htt_get_rx_ring_size(htt);828829vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);830if (!vaddr_ring)831goto err_dma_ring;832833ath10k_htt_config_paddrs_ring(htt, vaddr_ring);834htt->rx_ring.base_paddr = paddr;835836vaddr = dma_alloc_coherent(htt->ar->dev,837sizeof(*htt->rx_ring.alloc_idx.vaddr),838&paddr, GFP_KERNEL);839if (!vaddr)840goto err_dma_idx;841842htt->rx_ring.alloc_idx.vaddr = vaddr;843htt->rx_ring.alloc_idx.paddr = paddr;844htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;845*htt->rx_ring.alloc_idx.vaddr = 0;846847/* Initialize the Rx refill retry timer */848timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);849850spin_lock_init(&htt->rx_ring.lock);851#if defined(__FreeBSD__)852spin_lock_init(&htt->tx_fetch_ind_q.lock);853#endif854855htt->rx_ring.fill_cnt = 0;856htt->rx_ring.sw_rd_idx.msdu_payld = 0;857hash_init(htt->rx_ring.skb_table);858859skb_queue_head_init(&htt->rx_msdus_q);860skb_queue_head_init(&htt->rx_in_ord_compl_q);861skb_queue_head_init(&htt->tx_fetch_ind_q);862atomic_set(&htt->num_mpdus_ready, 0);863864ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",865htt->rx_ring.size, htt->rx_ring.fill_level);866return 0;867868err_dma_idx:869dma_free_coherent(htt->ar->dev,870ath10k_htt_get_rx_ring_size(htt),871vaddr_ring,872htt->rx_ring.base_paddr);873ath10k_htt_config_paddrs_ring(htt, NULL);874err_dma_ring:875kfree(htt->rx_ring.netbufs_ring);876htt->rx_ring.netbufs_ring = NULL;877err_netbuf:878return -ENOMEM;879}880881static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,882enum htt_rx_mpdu_encrypt_type type)883{884switch (type) {885case HTT_RX_MPDU_ENCRYPT_NONE:886return 0;887case HTT_RX_MPDU_ENCRYPT_WEP40:888case HTT_RX_MPDU_ENCRYPT_WEP104:889return IEEE80211_WEP_IV_LEN;890case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:891case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:892return IEEE80211_TKIP_IV_LEN;893case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:894return IEEE80211_CCMP_HDR_LEN;895case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:896return IEEE80211_CCMP_256_HDR_LEN;897case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:898case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:899return IEEE80211_GCMP_HDR_LEN;900case HTT_RX_MPDU_ENCRYPT_WEP128:901case HTT_RX_MPDU_ENCRYPT_WAPI:902break;903}904905ath10k_warn(ar, "unsupported encryption type %d\n", type);906return 0;907}908909#define MICHAEL_MIC_LEN 8910911static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,912enum htt_rx_mpdu_encrypt_type type)913{914switch (type) {915case HTT_RX_MPDU_ENCRYPT_NONE:916case HTT_RX_MPDU_ENCRYPT_WEP40:917case HTT_RX_MPDU_ENCRYPT_WEP104:918case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:919case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:920return 0;921case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:922return IEEE80211_CCMP_MIC_LEN;923case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:924return IEEE80211_CCMP_256_MIC_LEN;925case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:926case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:927return IEEE80211_GCMP_MIC_LEN;928case HTT_RX_MPDU_ENCRYPT_WEP128:929case HTT_RX_MPDU_ENCRYPT_WAPI:930break;931}932933ath10k_warn(ar, "unsupported encryption type %d\n", type);934return 0;935}936937static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,938enum htt_rx_mpdu_encrypt_type type)939{940switch (type) {941case HTT_RX_MPDU_ENCRYPT_NONE:942case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:943case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:944case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:945case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:946return 0;947case HTT_RX_MPDU_ENCRYPT_WEP40:948case HTT_RX_MPDU_ENCRYPT_WEP104:949return IEEE80211_WEP_ICV_LEN;950case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:951case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:952return IEEE80211_TKIP_ICV_LEN;953case HTT_RX_MPDU_ENCRYPT_WEP128:954case HTT_RX_MPDU_ENCRYPT_WAPI:955break;956}957958ath10k_warn(ar, "unsupported encryption type %d\n", type);959return 0;960}961962struct amsdu_subframe_hdr {963u8 dst[ETH_ALEN];964u8 src[ETH_ALEN];965__be16 len;966} __packed;967968#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)969970static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)971{972u8 ret = 0;973974switch (bw) {975case 0:976ret = RATE_INFO_BW_20;977break;978case 1:979ret = RATE_INFO_BW_40;980break;981case 2:982ret = RATE_INFO_BW_80;983break;984case 3:985ret = RATE_INFO_BW_160;986break;987}988989return ret;990}991992static void ath10k_htt_rx_h_rates(struct ath10k *ar,993struct ieee80211_rx_status *status,994struct htt_rx_desc *rxd)995{996struct ath10k_hw_params *hw = &ar->hw_params;997struct rx_attention *rxd_attention;998struct rx_mpdu_start *rxd_mpdu_start;999struct rx_mpdu_end *rxd_mpdu_end;1000struct rx_msdu_start_common *rxd_msdu_start_common;1001struct rx_msdu_end_common *rxd_msdu_end_common;1002struct rx_ppdu_start *rxd_ppdu_start;1003struct ieee80211_supported_band *sband;1004u8 cck, rate, bw, sgi, mcs, nss;1005u8 *rxd_msdu_payload;1006u8 preamble = 0;1007u8 group_id;1008u32 info1, info2, info3;1009u32 stbc, nsts_su;10101011rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);1012rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);1013rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);1014rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);1015rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);1016rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);1017rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);10181019info1 = __le32_to_cpu(rxd_ppdu_start->info1);1020info2 = __le32_to_cpu(rxd_ppdu_start->info2);1021info3 = __le32_to_cpu(rxd_ppdu_start->info3);10221023preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);10241025switch (preamble) {1026case HTT_RX_LEGACY:1027/* To get legacy rate index band is required. Since band can't1028* be undefined check if freq is non-zero.1029*/1030if (!status->freq)1031return;10321033cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;1034rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);1035rate &= ~RX_PPDU_START_RATE_FLAG;10361037sband = &ar->mac.sbands[status->band];1038status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);1039break;1040case HTT_RX_HT:1041case HTT_RX_HT_WITH_TXBF:1042/* HT-SIG - Table 20-11 in info2 and info3 */1043mcs = info2 & 0x1F;1044nss = mcs >> 3;1045bw = (info2 >> 7) & 1;1046sgi = (info3 >> 7) & 1;10471048status->rate_idx = mcs;1049status->encoding = RX_ENC_HT;1050if (sgi)1051status->enc_flags |= RX_ENC_FLAG_SHORT_GI;1052if (bw)1053status->bw = RATE_INFO_BW_40;1054break;1055case HTT_RX_VHT:1056case HTT_RX_VHT_WITH_TXBF:1057/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info31058* TODO check this1059*/1060bw = info2 & 3;1061sgi = info3 & 1;1062stbc = (info2 >> 3) & 1;1063group_id = (info2 >> 4) & 0x3F;10641065if (GROUP_ID_IS_SU_MIMO(group_id)) {1066mcs = (info3 >> 4) & 0x0F;1067nsts_su = ((info2 >> 10) & 0x07);1068if (stbc)1069nss = (nsts_su >> 2) + 1;1070else1071nss = (nsts_su + 1);1072} else {1073/* Hardware doesn't decode VHT-SIG-B into Rx descriptor1074* so it's impossible to decode MCS. Also since1075* firmware consumes Group Id Management frames host1076* has no knowledge regarding group/user position1077* mapping so it's impossible to pick the correct Nsts1078* from VHT-SIG-A1.1079*1080* Bandwidth and SGI are valid so report the rateinfo1081* on best-effort basis.1082*/1083mcs = 0;1084nss = 1;1085}10861087if (mcs > 0x09) {1088ath10k_warn(ar, "invalid MCS received %u\n", mcs);1089ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",1090__le32_to_cpu(rxd_attention->flags),1091__le32_to_cpu(rxd_mpdu_start->info0),1092__le32_to_cpu(rxd_mpdu_start->info1),1093__le32_to_cpu(rxd_msdu_start_common->info0),1094__le32_to_cpu(rxd_msdu_start_common->info1),1095rxd_ppdu_start->info0,1096__le32_to_cpu(rxd_ppdu_start->info1),1097__le32_to_cpu(rxd_ppdu_start->info2),1098__le32_to_cpu(rxd_ppdu_start->info3),1099__le32_to_cpu(rxd_ppdu_start->info4));11001101ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",1102__le32_to_cpu(rxd_msdu_end_common->info0),1103__le32_to_cpu(rxd_mpdu_end->info0));11041105ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,1106"rx desc msdu payload: ",1107rxd_msdu_payload, 50);1108}11091110status->rate_idx = mcs;1111status->nss = nss;11121113if (sgi)1114status->enc_flags |= RX_ENC_FLAG_SHORT_GI;11151116status->bw = ath10k_bw_to_mac80211_bw(bw);1117status->encoding = RX_ENC_VHT;1118break;1119default:1120break;1121}1122}11231124static struct ieee80211_channel *1125ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)1126{1127struct ath10k_hw_params *hw = &ar->hw_params;1128struct rx_attention *rxd_attention;1129struct rx_msdu_end_common *rxd_msdu_end_common;1130struct rx_mpdu_start *rxd_mpdu_start;1131struct ath10k_peer *peer;1132struct ath10k_vif *arvif;1133struct cfg80211_chan_def def;1134u16 peer_id;11351136lockdep_assert_held(&ar->data_lock);11371138if (!rxd)1139return NULL;11401141rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);1142rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);1143rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);11441145if (rxd_attention->flags &1146__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))1147return NULL;11481149if (!(rxd_msdu_end_common->info0 &1150__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))1151return NULL;11521153peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),1154RX_MPDU_START_INFO0_PEER_IDX);11551156peer = ath10k_peer_find_by_id(ar, peer_id);1157if (!peer)1158return NULL;11591160arvif = ath10k_get_arvif(ar, peer->vdev_id);1161if (WARN_ON_ONCE(!arvif))1162return NULL;11631164if (ath10k_mac_vif_chan(arvif->vif, &def))1165return NULL;11661167return def.chan;1168}11691170static struct ieee80211_channel *1171ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)1172{1173struct ath10k_vif *arvif;1174struct cfg80211_chan_def def;11751176lockdep_assert_held(&ar->data_lock);11771178list_for_each_entry(arvif, &ar->arvifs, list) {1179if (arvif->vdev_id == vdev_id &&1180ath10k_mac_vif_chan(arvif->vif, &def) == 0)1181return def.chan;1182}11831184return NULL;1185}11861187static void1188ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,1189struct ieee80211_chanctx_conf *conf,1190void *data)1191{1192struct cfg80211_chan_def *def = data;11931194*def = conf->def;1195}11961197static struct ieee80211_channel *1198ath10k_htt_rx_h_any_channel(struct ath10k *ar)1199{1200struct cfg80211_chan_def def = {};12011202ieee80211_iter_chan_contexts_atomic(ar->hw,1203ath10k_htt_rx_h_any_chan_iter,1204&def);12051206return def.chan;1207}12081209static bool ath10k_htt_rx_h_channel(struct ath10k *ar,1210struct ieee80211_rx_status *status,1211struct htt_rx_desc *rxd,1212u32 vdev_id)1213{1214struct ieee80211_channel *ch;12151216spin_lock_bh(&ar->data_lock);1217ch = ar->scan_channel;1218if (!ch)1219ch = ar->rx_channel;1220if (!ch)1221ch = ath10k_htt_rx_h_peer_channel(ar, rxd);1222if (!ch)1223ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);1224if (!ch)1225ch = ath10k_htt_rx_h_any_channel(ar);1226if (!ch)1227ch = ar->tgt_oper_chan;1228spin_unlock_bh(&ar->data_lock);12291230if (!ch)1231return false;12321233status->band = ch->band;1234status->freq = ch->center_freq;12351236return true;1237}12381239static void ath10k_htt_rx_h_signal(struct ath10k *ar,1240struct ieee80211_rx_status *status,1241struct htt_rx_desc *rxd)1242{1243struct ath10k_hw_params *hw = &ar->hw_params;1244struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);1245int i;12461247for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {1248status->chains &= ~BIT(i);12491250if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {1251status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +1252rxd_ppdu_start->rssi_chains[i].pri20_mhz;12531254status->chains |= BIT(i);1255}1256}12571258/* FIXME: Get real NF */1259status->signal = ATH10K_DEFAULT_NOISE_FLOOR +1260rxd_ppdu_start->rssi_comb;1261status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;1262}12631264static void ath10k_htt_rx_h_mactime(struct ath10k *ar,1265struct ieee80211_rx_status *status,1266struct htt_rx_desc *rxd)1267{1268struct ath10k_hw_params *hw = &ar->hw_params;1269struct rx_ppdu_end_common *rxd_ppdu_end_common;12701271rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);12721273/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This1274* means all prior MSDUs in a PPDU are reported to mac80211 without the1275* TSF. Is it worth holding frames until end of PPDU is known?1276*1277* FIXME: Can we get/compute 64bit TSF?1278*/1279status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);1280status->flag |= RX_FLAG_MACTIME_END;1281}12821283static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,1284struct sk_buff_head *amsdu,1285struct ieee80211_rx_status *status,1286u32 vdev_id)1287{1288struct sk_buff *first;1289struct ath10k_hw_params *hw = &ar->hw_params;1290struct htt_rx_desc *rxd;1291struct rx_attention *rxd_attention;1292bool is_first_ppdu;1293bool is_last_ppdu;12941295if (skb_queue_empty(amsdu))1296return;12971298first = skb_peek(amsdu);1299rxd = HTT_RX_BUF_TO_RX_DESC(hw,1300#if defined(__linux__)1301(void *)first->data - hw->rx_desc_ops->rx_desc_size);1302#elif defined(__FreeBSD__)1303(u8 *)first->data - hw->rx_desc_ops->rx_desc_size);1304#endif13051306rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);13071308is_first_ppdu = !!(rxd_attention->flags &1309__cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));1310is_last_ppdu = !!(rxd_attention->flags &1311__cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));13121313if (is_first_ppdu) {1314/* New PPDU starts so clear out the old per-PPDU status. */1315status->freq = 0;1316status->rate_idx = 0;1317status->nss = 0;1318status->encoding = RX_ENC_LEGACY;1319status->bw = RATE_INFO_BW_20;13201321status->flag &= ~RX_FLAG_MACTIME;1322status->flag |= RX_FLAG_NO_SIGNAL_VAL;13231324status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);1325status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;1326status->ampdu_reference = ar->ampdu_reference;13271328ath10k_htt_rx_h_signal(ar, status, rxd);1329ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);1330ath10k_htt_rx_h_rates(ar, status, rxd);1331}13321333if (is_last_ppdu) {1334ath10k_htt_rx_h_mactime(ar, status, rxd);13351336/* set ampdu last segment flag */1337status->flag |= RX_FLAG_AMPDU_IS_LAST;1338ar->ampdu_reference++;1339}1340}13411342static const char * const tid_to_ac[] = {1343"BE",1344"BK",1345"BK",1346"BE",1347"VI",1348"VI",1349"VO",1350"VO",1351};13521353static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)1354{1355u8 *qc;1356int tid;13571358if (!ieee80211_is_data_qos(hdr->frame_control))1359return "";13601361qc = ieee80211_get_qos_ctl(hdr);1362tid = *qc & IEEE80211_QOS_CTL_TID_MASK;1363if (tid < 8)1364snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);1365else1366snprintf(out, size, "tid %d", tid);13671368return out;1369}13701371static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,1372struct ieee80211_rx_status *rx_status,1373struct sk_buff *skb)1374{1375struct ieee80211_rx_status *status;13761377status = IEEE80211_SKB_RXCB(skb);1378*status = *rx_status;13791380skb_queue_tail(&ar->htt.rx_msdus_q, skb);1381}13821383static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)1384{1385struct ieee80211_rx_status *status;1386struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;1387char tid[32];13881389status = IEEE80211_SKB_RXCB(skb);13901391if (!(ar->filter_flags & FIF_FCSFAIL) &&1392status->flag & RX_FLAG_FAILED_FCS_CRC) {1393ar->stats.rx_crc_err_drop++;1394dev_kfree_skb_any(skb);1395return;1396}13971398ath10k_dbg(ar, ATH10K_DBG_DATA,1399"rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",1400skb,1401skb->len,1402ieee80211_get_SA(hdr),1403ath10k_get_tid(hdr, tid, sizeof(tid)),1404is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?1405"mcast" : "ucast",1406IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),1407(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",1408(status->encoding == RX_ENC_HT) ? "ht" : "",1409(status->encoding == RX_ENC_VHT) ? "vht" : "",1410(status->bw == RATE_INFO_BW_40) ? "40" : "",1411(status->bw == RATE_INFO_BW_80) ? "80" : "",1412(status->bw == RATE_INFO_BW_160) ? "160" : "",1413status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",1414status->rate_idx,1415status->nss,1416status->freq,1417status->band, status->flag,1418!!(status->flag & RX_FLAG_FAILED_FCS_CRC),1419!!(status->flag & RX_FLAG_MMIC_ERROR),1420!!(status->flag & RX_FLAG_AMSDU_MORE));1421ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",1422skb->data, skb->len);1423trace_ath10k_rx_hdr(ar, skb->data, skb->len);1424trace_ath10k_rx_payload(ar, skb->data, skb->len);14251426ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);1427}14281429static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,1430struct ieee80211_hdr *hdr)1431{1432int len = ieee80211_hdrlen(hdr->frame_control);14331434if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,1435ar->running_fw->fw_file.fw_features))1436len = round_up(len, 4);14371438return len;1439}14401441static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,1442struct sk_buff *msdu,1443struct ieee80211_rx_status *status,1444enum htt_rx_mpdu_encrypt_type enctype,1445bool is_decrypted,1446const u8 first_hdr[64])1447{1448struct ieee80211_hdr *hdr;1449struct ath10k_hw_params *hw = &ar->hw_params;1450struct htt_rx_desc *rxd;1451struct rx_msdu_end_common *rxd_msdu_end_common;1452size_t hdr_len;1453size_t crypto_len;1454bool is_first;1455bool is_last;1456bool msdu_limit_err;1457int bytes_aligned = ar->hw_params.decap_align_bytes;1458u8 *qos;14591460rxd = HTT_RX_BUF_TO_RX_DESC(hw,1461#if defined(__linux__)1462(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);1463#elif defined(__FreeBSD__)1464(u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);1465#endif14661467rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);1468is_first = !!(rxd_msdu_end_common->info0 &1469__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));1470is_last = !!(rxd_msdu_end_common->info0 &1471__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));14721473/* Delivered decapped frame:1474* [802.11 header]1475* [crypto param] <-- can be trimmed if !fcs_err &&1476* !decrypt_err && !peer_idx_invalid1477* [amsdu header] <-- only if A-MSDU1478* [rfc1042/llc]1479* [payload]1480* [FCS] <-- at end, needs to be trimmed1481*/14821483/* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when1484* deaggregate, so that unwanted MSDU-deaggregation is avoided for1485* error packets. If limit exceeds, hw sends all remaining MSDUs as1486* a single last MSDU with this msdu limit error set.1487*/1488msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);14891490/* If MSDU limit error happens, then don't warn on, the partial raw MSDU1491* without first MSDU is expected in that case, and handled later here.1492*/1493/* This probably shouldn't happen but warn just in case */1494if (WARN_ON_ONCE(!is_first && !msdu_limit_err))1495return;14961497/* This probably shouldn't happen but warn just in case */1498if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))1499return;15001501skb_trim(msdu, msdu->len - FCS_LEN);15021503/* Push original 80211 header */1504if (unlikely(msdu_limit_err)) {1505#if defined(__linux__)1506hdr = (struct ieee80211_hdr *)first_hdr;1507#elif defined(__FreeBSD__)1508hdr = __DECONST(struct ieee80211_hdr *, first_hdr);1509#endif1510hdr_len = ieee80211_hdrlen(hdr->frame_control);1511crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);15121513if (ieee80211_is_data_qos(hdr->frame_control)) {1514qos = ieee80211_get_qos_ctl(hdr);1515qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;1516}15171518if (crypto_len)1519memcpy(skb_push(msdu, crypto_len),1520#if defined(__linux__)1521(void *)hdr + round_up(hdr_len, bytes_aligned),1522#elif defined(__FreeBSD__)1523(u8 *)hdr + round_up(hdr_len, bytes_aligned),1524#endif1525crypto_len);15261527memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);1528}15291530/* In most cases this will be true for sniffed frames. It makes sense1531* to deliver them as-is without stripping the crypto param. This is1532* necessary for software based decryption.1533*1534* If there's no error then the frame is decrypted. At least that is1535* the case for frames that come in via fragmented rx indication.1536*/1537if (!is_decrypted)1538return;15391540/* The payload is decrypted so strip crypto params. Start from tail1541* since hdr is used to compute some stuff.1542*/15431544hdr = (void *)msdu->data;15451546/* Tail */1547if (status->flag & RX_FLAG_IV_STRIPPED) {1548skb_trim(msdu, msdu->len -1549ath10k_htt_rx_crypto_mic_len(ar, enctype));15501551skb_trim(msdu, msdu->len -1552ath10k_htt_rx_crypto_icv_len(ar, enctype));1553} else {1554/* MIC */1555if (status->flag & RX_FLAG_MIC_STRIPPED)1556skb_trim(msdu, msdu->len -1557ath10k_htt_rx_crypto_mic_len(ar, enctype));15581559/* ICV */1560if (status->flag & RX_FLAG_ICV_STRIPPED)1561skb_trim(msdu, msdu->len -1562ath10k_htt_rx_crypto_icv_len(ar, enctype));1563}15641565/* MMIC */1566if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&1567!ieee80211_has_morefrags(hdr->frame_control) &&1568enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)1569skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);15701571/* Head */1572if (status->flag & RX_FLAG_IV_STRIPPED) {1573hdr_len = ieee80211_hdrlen(hdr->frame_control);1574crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);15751576#if defined(__linux__)1577memmove((void *)msdu->data + crypto_len,1578#elif defined(__FreeBSD__)1579memmove((u8 *)msdu->data + crypto_len,1580#endif1581(void *)msdu->data, hdr_len);1582skb_pull(msdu, crypto_len);1583}1584}15851586static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,1587struct sk_buff *msdu,1588struct ieee80211_rx_status *status,1589const u8 first_hdr[64],1590enum htt_rx_mpdu_encrypt_type enctype)1591{1592struct ath10k_hw_params *hw = &ar->hw_params;1593#if defined(__linux__)1594struct ieee80211_hdr *hdr;1595#elif defined(__FreeBSD__)1596const struct ieee80211_hdr *hdr;1597struct ieee80211_hdr *hdr2;1598#endif1599struct htt_rx_desc *rxd;1600size_t hdr_len;1601u8 da[ETH_ALEN];1602u8 sa[ETH_ALEN];1603int l3_pad_bytes;1604int bytes_aligned = ar->hw_params.decap_align_bytes;16051606/* Delivered decapped frame:1607* [nwifi 802.11 header] <-- replaced with 802.11 hdr1608* [rfc1042/llc]1609*1610* Note: The nwifi header doesn't have QoS Control and is1611* (always?) a 3addr frame.1612*1613* Note2: There's no A-MSDU subframe header. Even if it's part1614* of an A-MSDU.1615*/16161617/* pull decapped header and copy SA & DA */1618#if defined(__linux__)1619rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -1620#elif defined(__FreeBSD__)1621rxd = HTT_RX_BUF_TO_RX_DESC(hw, (u8 *)msdu->data -1622#endif1623hw->rx_desc_ops->rx_desc_size);16241625l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);1626skb_put(msdu, l3_pad_bytes);16271628#if defined(__linux__)1629hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);16301631hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);1632ether_addr_copy(da, ieee80211_get_DA(hdr));1633ether_addr_copy(sa, ieee80211_get_SA(hdr));1634#elif defined(__FreeBSD__)1635hdr2 = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);16361637hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr2);1638ether_addr_copy(da, ieee80211_get_DA(hdr2));1639ether_addr_copy(sa, ieee80211_get_SA(hdr2));1640#endif1641skb_pull(msdu, hdr_len);16421643/* push original 802.11 header */1644#if defined(__linux__)1645hdr = (struct ieee80211_hdr *)first_hdr;1646#elif defined(__FreeBSD__)1647hdr = (const struct ieee80211_hdr *)first_hdr;1648#endif1649hdr_len = ieee80211_hdrlen(hdr->frame_control);16501651if (!(status->flag & RX_FLAG_IV_STRIPPED)) {1652memcpy(skb_push(msdu,1653ath10k_htt_rx_crypto_param_len(ar, enctype)),1654#if defined(__linux__)1655(void *)hdr + round_up(hdr_len, bytes_aligned),1656#elif defined(__FreeBSD__)1657(const u8 *)hdr + round_up(hdr_len, bytes_aligned),1658#endif1659ath10k_htt_rx_crypto_param_len(ar, enctype));1660}16611662memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);16631664/* original 802.11 header has a different DA and in1665* case of 4addr it may also have different SA1666*/1667#if defined(__linux__)1668hdr = (struct ieee80211_hdr *)msdu->data;1669ether_addr_copy(ieee80211_get_DA(hdr), da);1670ether_addr_copy(ieee80211_get_SA(hdr), sa);1671#elif defined(__FreeBSD__)1672hdr2 = (struct ieee80211_hdr *)msdu->data;1673ether_addr_copy(ieee80211_get_DA(hdr2), da);1674ether_addr_copy(ieee80211_get_SA(hdr2), sa);1675#endif1676}16771678static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,1679struct sk_buff *msdu,1680enum htt_rx_mpdu_encrypt_type enctype)1681{1682struct ieee80211_hdr *hdr;1683struct ath10k_hw_params *hw = &ar->hw_params;1684struct htt_rx_desc *rxd;1685struct rx_msdu_end_common *rxd_msdu_end_common;1686u8 *rxd_rx_hdr_status;1687size_t hdr_len, crypto_len;1688#if defined(__linux__)1689void *rfc1042;1690#elif defined(__FreeBSD__)1691u8 *rfc1042;1692#endif1693bool is_first, is_last, is_amsdu;1694int bytes_aligned = ar->hw_params.decap_align_bytes;16951696rxd = HTT_RX_BUF_TO_RX_DESC(hw,1697#if defined(__linux__)1698(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);1699#elif defined(__FreeBSD__)1700(u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);1701#endif17021703rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);1704rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);1705hdr = (void *)rxd_rx_hdr_status;17061707is_first = !!(rxd_msdu_end_common->info0 &1708__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));1709is_last = !!(rxd_msdu_end_common->info0 &1710__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));1711is_amsdu = !(is_first && is_last);17121713#if defined(__linux__)1714rfc1042 = hdr;1715#elif defined(__FreeBSD__)1716rfc1042 = (void *)hdr;1717#endif17181719if (is_first) {1720hdr_len = ieee80211_hdrlen(hdr->frame_control);1721crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);17221723rfc1042 += round_up(hdr_len, bytes_aligned) +1724round_up(crypto_len, bytes_aligned);1725}17261727if (is_amsdu)1728rfc1042 += sizeof(struct amsdu_subframe_hdr);17291730return rfc1042;1731}17321733static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,1734struct sk_buff *msdu,1735struct ieee80211_rx_status *status,1736const u8 first_hdr[64],1737enum htt_rx_mpdu_encrypt_type enctype)1738{1739struct ath10k_hw_params *hw = &ar->hw_params;1740#if defined(__linux__)1741struct ieee80211_hdr *hdr;1742#elif defined(__FreeBSD__)1743const struct ieee80211_hdr *hdr;1744struct ieee80211_hdr *hdr2;1745#endif1746struct ethhdr *eth;1747size_t hdr_len;1748void *rfc1042;1749u8 da[ETH_ALEN];1750u8 sa[ETH_ALEN];1751int l3_pad_bytes;1752struct htt_rx_desc *rxd;1753int bytes_aligned = ar->hw_params.decap_align_bytes;17541755/* Delivered decapped frame:1756* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc1757* [payload]1758*/17591760rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);1761if (WARN_ON_ONCE(!rfc1042))1762return;17631764rxd = HTT_RX_BUF_TO_RX_DESC(hw,1765#if defined(__linux__)1766(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);1767#elif defined(__FreeBSD__)1768(u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);1769#endif17701771l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);1772skb_put(msdu, l3_pad_bytes);1773skb_pull(msdu, l3_pad_bytes);17741775/* pull decapped header and copy SA & DA */1776eth = (struct ethhdr *)msdu->data;1777ether_addr_copy(da, eth->h_dest);1778ether_addr_copy(sa, eth->h_source);1779skb_pull(msdu, sizeof(struct ethhdr));17801781/* push rfc1042/llc/snap */1782memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,1783sizeof(struct rfc1042_hdr));17841785/* push original 802.11 header */1786#if defined(__linux__)1787hdr = (struct ieee80211_hdr *)first_hdr;1788#elif defined(__FreeBSD__)1789hdr = (const struct ieee80211_hdr *)first_hdr;1790#endif1791hdr_len = ieee80211_hdrlen(hdr->frame_control);17921793if (!(status->flag & RX_FLAG_IV_STRIPPED)) {1794memcpy(skb_push(msdu,1795ath10k_htt_rx_crypto_param_len(ar, enctype)),1796#if defined(__linux__)1797(void *)hdr + round_up(hdr_len, bytes_aligned),1798#elif defined(__FreeBSD__)1799(const u8 *)hdr + round_up(hdr_len, bytes_aligned),1800#endif1801ath10k_htt_rx_crypto_param_len(ar, enctype));1802}18031804memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);18051806/* original 802.11 header has a different DA and in1807* case of 4addr it may also have different SA1808*/1809#if defined(__linux__)1810hdr = (struct ieee80211_hdr *)msdu->data;1811ether_addr_copy(ieee80211_get_DA(hdr), da);1812ether_addr_copy(ieee80211_get_SA(hdr), sa);1813#elif defined(__FreeBSD__)1814hdr2 = (struct ieee80211_hdr *)msdu->data;1815ether_addr_copy(ieee80211_get_DA(hdr2), da);1816ether_addr_copy(ieee80211_get_SA(hdr2), sa);1817#endif1818}18191820static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,1821struct sk_buff *msdu,1822struct ieee80211_rx_status *status,1823const u8 first_hdr[64],1824enum htt_rx_mpdu_encrypt_type enctype)1825{1826struct ath10k_hw_params *hw = &ar->hw_params;1827#if defined(__linux__)1828struct ieee80211_hdr *hdr;1829#elif defined(__FreeBSD__)1830const struct ieee80211_hdr *hdr;1831#endif1832size_t hdr_len;1833int l3_pad_bytes;1834struct htt_rx_desc *rxd;1835int bytes_aligned = ar->hw_params.decap_align_bytes;18361837/* Delivered decapped frame:1838* [amsdu header] <-- replaced with 802.11 hdr1839* [rfc1042/llc]1840* [payload]1841*/18421843rxd = HTT_RX_BUF_TO_RX_DESC(hw,1844#if defined(__linux__)1845(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);1846#elif defined(__FreeBSD__)1847(u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);1848#endif18491850l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);18511852skb_put(msdu, l3_pad_bytes);1853skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);18541855#if defined(__linux__)1856hdr = (struct ieee80211_hdr *)first_hdr;1857#elif defined(__FreeBSD__)1858hdr = (const struct ieee80211_hdr *)first_hdr;1859#endif1860hdr_len = ieee80211_hdrlen(hdr->frame_control);18611862if (!(status->flag & RX_FLAG_IV_STRIPPED)) {1863memcpy(skb_push(msdu,1864ath10k_htt_rx_crypto_param_len(ar, enctype)),1865#if defined(__linux__)1866(void *)hdr + round_up(hdr_len, bytes_aligned),1867#elif defined(__FreeBSD__)1868(const u8 *)hdr + round_up(hdr_len, bytes_aligned),1869#endif1870ath10k_htt_rx_crypto_param_len(ar, enctype));1871}18721873memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);1874}18751876static void ath10k_htt_rx_h_undecap(struct ath10k *ar,1877struct sk_buff *msdu,1878struct ieee80211_rx_status *status,1879u8 first_hdr[64],1880enum htt_rx_mpdu_encrypt_type enctype,1881bool is_decrypted)1882{1883struct ath10k_hw_params *hw = &ar->hw_params;1884struct htt_rx_desc *rxd;1885struct rx_msdu_start_common *rxd_msdu_start_common;1886enum rx_msdu_decap_format decap;18871888/* First msdu's decapped header:1889* [802.11 header] <-- padded to 4 bytes long1890* [crypto param] <-- padded to 4 bytes long1891* [amsdu header] <-- only if A-MSDU1892* [rfc1042/llc]1893*1894* Other (2nd, 3rd, ..) msdu's decapped header:1895* [amsdu header] <-- only if A-MSDU1896* [rfc1042/llc]1897*/18981899rxd = HTT_RX_BUF_TO_RX_DESC(hw,1900#if defined(__linux__)1901(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);1902#elif defined(__FreeBSD__)1903(u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);1904#endif19051906rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);1907decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),1908RX_MSDU_START_INFO1_DECAP_FORMAT);19091910switch (decap) {1911case RX_MSDU_DECAP_RAW:1912ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,1913is_decrypted, first_hdr);1914break;1915case RX_MSDU_DECAP_NATIVE_WIFI:1916ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,1917enctype);1918break;1919case RX_MSDU_DECAP_ETHERNET2_DIX:1920ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);1921break;1922case RX_MSDU_DECAP_8023_SNAP_LLC:1923ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,1924enctype);1925break;1926}1927}19281929static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)1930{1931struct htt_rx_desc *rxd;1932struct rx_attention *rxd_attention;1933struct rx_msdu_start_common *rxd_msdu_start_common;1934u32 flags, info;1935bool is_ip4, is_ip6;1936bool is_tcp, is_udp;1937bool ip_csum_ok, tcpudp_csum_ok;19381939rxd = HTT_RX_BUF_TO_RX_DESC(hw,1940#if defined(__linux__)1941(void *)skb->data - hw->rx_desc_ops->rx_desc_size);1942#elif defined(__FreeBSD__)1943(u8 *)skb->data - hw->rx_desc_ops->rx_desc_size);1944#endif19451946rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);1947rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);1948flags = __le32_to_cpu(rxd_attention->flags);1949info = __le32_to_cpu(rxd_msdu_start_common->info1);19501951is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);1952is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);1953is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);1954is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);1955ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);1956tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);19571958if (!is_ip4 && !is_ip6)1959return CHECKSUM_NONE;1960if (!is_tcp && !is_udp)1961return CHECKSUM_NONE;1962if (!ip_csum_ok)1963return CHECKSUM_NONE;1964if (!tcpudp_csum_ok)1965return CHECKSUM_NONE;19661967return CHECKSUM_UNNECESSARY;1968}19691970static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,1971struct sk_buff *msdu)1972{1973msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);1974}19751976static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,1977enum htt_rx_mpdu_encrypt_type enctype)1978{1979struct ieee80211_hdr *hdr;1980u64 pn = 0;1981u8 *ehdr;19821983hdr = (struct ieee80211_hdr *)skb->data;1984ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);19851986if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {1987pn = ehdr[0];1988pn |= (u64)ehdr[1] << 8;1989pn |= (u64)ehdr[4] << 16;1990pn |= (u64)ehdr[5] << 24;1991pn |= (u64)ehdr[6] << 32;1992pn |= (u64)ehdr[7] << 40;1993}1994return pn;1995}19961997static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,1998struct sk_buff *skb)1999{2000struct ieee80211_hdr *hdr;20012002hdr = (struct ieee80211_hdr *)skb->data;2003return !is_multicast_ether_addr(hdr->addr1);2004}20052006static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,2007struct sk_buff *skb,2008u16 peer_id,2009enum htt_rx_mpdu_encrypt_type enctype)2010{2011struct ath10k_peer *peer;2012union htt_rx_pn_t *last_pn, new_pn = {};2013struct ieee80211_hdr *hdr;2014u8 tid, frag_number;2015u32 seq;20162017peer = ath10k_peer_find_by_id(ar, peer_id);2018if (!peer) {2019ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");2020return false;2021}20222023hdr = (struct ieee80211_hdr *)skb->data;2024if (ieee80211_is_data_qos(hdr->frame_control))2025tid = ieee80211_get_tid(hdr);2026else2027tid = ATH10K_TXRX_NON_QOS_TID;20282029last_pn = &peer->frag_tids_last_pn[tid];2030new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);2031frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;2032seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));20332034if (frag_number == 0) {2035last_pn->pn48 = new_pn.pn48;2036peer->frag_tids_seq[tid] = seq;2037} else {2038if (seq != peer->frag_tids_seq[tid])2039return false;20402041if (new_pn.pn48 != last_pn->pn48 + 1)2042return false;20432044last_pn->pn48 = new_pn.pn48;2045}20462047return true;2048}20492050static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,2051struct sk_buff_head *amsdu,2052struct ieee80211_rx_status *status,2053bool fill_crypt_header,2054u8 *rx_hdr,2055enum ath10k_pkt_rx_err *err,2056u16 peer_id,2057bool frag)2058{2059struct sk_buff *first;2060struct sk_buff *last;2061struct sk_buff *msdu, *temp;2062struct ath10k_hw_params *hw = &ar->hw_params;2063struct htt_rx_desc *rxd;2064struct rx_attention *rxd_attention;2065struct rx_mpdu_start *rxd_mpdu_start;20662067struct ieee80211_hdr *hdr;2068enum htt_rx_mpdu_encrypt_type enctype;2069u8 first_hdr[64];2070u8 *qos;2071bool has_fcs_err;2072bool has_crypto_err;2073bool has_tkip_err;2074bool has_peer_idx_invalid;2075bool is_decrypted;2076bool is_mgmt;2077u32 attention;2078bool frag_pn_check = true, multicast_check = true;20792080if (skb_queue_empty(amsdu))2081return;20822083first = skb_peek(amsdu);2084rxd = HTT_RX_BUF_TO_RX_DESC(hw,2085#if defined(__linux__)2086(void *)first->data - hw->rx_desc_ops->rx_desc_size);2087#elif defined(__FreeBSD__)2088(u8 *)first->data - hw->rx_desc_ops->rx_desc_size);2089#endif20902091rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);2092rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);20932094is_mgmt = !!(rxd_attention->flags &2095__cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));20962097enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),2098RX_MPDU_START_INFO0_ENCRYPT_TYPE);20992100/* First MSDU's Rx descriptor in an A-MSDU contains full 802.112101* decapped header. It'll be used for undecapping of each MSDU.2102*/2103hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);2104memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);21052106if (rx_hdr)2107memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);21082109/* Each A-MSDU subframe will use the original header as the base and be2110* reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.2111*/2112hdr = (void *)first_hdr;21132114if (ieee80211_is_data_qos(hdr->frame_control)) {2115qos = ieee80211_get_qos_ctl(hdr);2116qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;2117}21182119/* Some attention flags are valid only in the last MSDU. */2120last = skb_peek_tail(amsdu);2121rxd = HTT_RX_BUF_TO_RX_DESC(hw,2122#if defined(__linux__)2123(void *)last->data - hw->rx_desc_ops->rx_desc_size);2124#elif defined(__FreeBSD__)2125(u8 *)last->data - hw->rx_desc_ops->rx_desc_size);2126#endif21272128rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);2129attention = __le32_to_cpu(rxd_attention->flags);21302131has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);2132has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);2133has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);2134has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);21352136/* Note: If hardware captures an encrypted frame that it can't decrypt,2137* e.g. due to fcs error, missing peer or invalid key data it will2138* report the frame as raw.2139*/2140is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&2141!has_fcs_err &&2142!has_crypto_err &&2143!has_peer_idx_invalid);21442145/* Clear per-MPDU flags while leaving per-PPDU flags intact. */2146status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |2147RX_FLAG_MMIC_ERROR |2148RX_FLAG_DECRYPTED |2149RX_FLAG_IV_STRIPPED |2150RX_FLAG_ONLY_MONITOR |2151RX_FLAG_MMIC_STRIPPED);21522153if (has_fcs_err)2154status->flag |= RX_FLAG_FAILED_FCS_CRC;21552156if (has_tkip_err)2157status->flag |= RX_FLAG_MMIC_ERROR;21582159if (err) {2160if (has_fcs_err)2161*err = ATH10K_PKT_RX_ERR_FCS;2162else if (has_tkip_err)2163*err = ATH10K_PKT_RX_ERR_TKIP;2164else if (has_crypto_err)2165*err = ATH10K_PKT_RX_ERR_CRYPT;2166else if (has_peer_idx_invalid)2167*err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;2168}21692170/* Firmware reports all necessary management frames via WMI already.2171* They are not reported to monitor interfaces at all so pass the ones2172* coming via HTT to monitor interfaces instead. This simplifies2173* matters a lot.2174*/2175if (is_mgmt)2176status->flag |= RX_FLAG_ONLY_MONITOR;21772178if (is_decrypted) {2179status->flag |= RX_FLAG_DECRYPTED;21802181if (likely(!is_mgmt))2182status->flag |= RX_FLAG_MMIC_STRIPPED;21832184if (fill_crypt_header)2185status->flag |= RX_FLAG_MIC_STRIPPED |2186RX_FLAG_ICV_STRIPPED;2187else2188status->flag |= RX_FLAG_IV_STRIPPED;2189}21902191skb_queue_walk(amsdu, msdu) {2192if (frag && !fill_crypt_header && is_decrypted &&2193enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)2194frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,2195msdu,2196peer_id,2197enctype);21982199if (frag)2200multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,2201msdu);22022203if (!frag_pn_check || !multicast_check) {2204/* Discard the fragment with invalid PN or multicast DA2205*/2206temp = msdu->prev;2207__skb_unlink(msdu, amsdu);2208dev_kfree_skb_any(msdu);2209msdu = temp;2210frag_pn_check = true;2211multicast_check = true;2212continue;2213}22142215ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);22162217if (frag && !fill_crypt_header &&2218enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)2219status->flag &= ~RX_FLAG_MMIC_STRIPPED;22202221ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,2222is_decrypted);22232224/* Undecapping involves copying the original 802.11 header back2225* to sk_buff. If frame is protected and hardware has decrypted2226* it then remove the protected bit.2227*/2228if (!is_decrypted)2229continue;2230if (is_mgmt)2231continue;22322233if (fill_crypt_header)2234continue;22352236hdr = (void *)msdu->data;2237hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);22382239if (frag && !fill_crypt_header &&2240enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)2241status->flag &= ~RX_FLAG_IV_STRIPPED &2242~RX_FLAG_MMIC_STRIPPED;2243}2244}22452246static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,2247struct sk_buff_head *amsdu,2248struct ieee80211_rx_status *status)2249{2250struct sk_buff *msdu;2251struct sk_buff *first_subframe;22522253first_subframe = skb_peek(amsdu);22542255while ((msdu = __skb_dequeue(amsdu))) {2256/* Setup per-MSDU flags */2257if (skb_queue_empty(amsdu))2258status->flag &= ~RX_FLAG_AMSDU_MORE;2259else2260status->flag |= RX_FLAG_AMSDU_MORE;22612262if (msdu == first_subframe) {2263first_subframe = NULL;2264status->flag &= ~RX_FLAG_ALLOW_SAME_PN;2265} else {2266status->flag |= RX_FLAG_ALLOW_SAME_PN;2267}22682269ath10k_htt_rx_h_queue_msdu(ar, status, msdu);2270}2271}22722273static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,2274unsigned long *unchain_cnt)2275{2276struct sk_buff *skb, *first;2277int space;2278int total_len = 0;2279int amsdu_len = skb_queue_len(amsdu);22802281/* TODO: Might could optimize this by using2282* skb_try_coalesce or similar method to2283* decrease copying, or maybe get mac80211 to2284* provide a way to just receive a list of2285* skb?2286*/22872288first = __skb_dequeue(amsdu);22892290/* Allocate total length all at once. */2291skb_queue_walk(amsdu, skb)2292total_len += skb->len;22932294space = total_len - skb_tailroom(first);2295if ((space > 0) &&2296(pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {2297/* TODO: bump some rx-oom error stat */2298/* put it back together so we can free the2299* whole list at once.2300*/2301__skb_queue_head(amsdu, first);2302return -1;2303}23042305/* Walk list again, copying contents into2306* msdu_head2307*/2308while ((skb = __skb_dequeue(amsdu))) {2309skb_copy_from_linear_data(skb, skb_put(first, skb->len),2310skb->len);2311dev_kfree_skb_any(skb);2312}23132314__skb_queue_head(amsdu, first);23152316*unchain_cnt += amsdu_len - 1;23172318return 0;2319}23202321static void ath10k_htt_rx_h_unchain(struct ath10k *ar,2322struct sk_buff_head *amsdu,2323unsigned long *drop_cnt,2324unsigned long *unchain_cnt)2325{2326struct sk_buff *first;2327struct ath10k_hw_params *hw = &ar->hw_params;2328struct htt_rx_desc *rxd;2329struct rx_msdu_start_common *rxd_msdu_start_common;2330struct rx_frag_info_common *rxd_frag_info;2331enum rx_msdu_decap_format decap;23322333first = skb_peek(amsdu);2334rxd = HTT_RX_BUF_TO_RX_DESC(hw,2335#if defined(__linux__)2336(void *)first->data - hw->rx_desc_ops->rx_desc_size);2337#elif defined(__FreeBSD__)2338(u8 *)first->data - hw->rx_desc_ops->rx_desc_size);2339#endif23402341rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);2342rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);2343decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),2344RX_MSDU_START_INFO1_DECAP_FORMAT);23452346/* FIXME: Current unchaining logic can only handle simple case of raw2347* msdu chaining. If decapping is other than raw the chaining may be2348* more complex and this isn't handled by the current code. Don't even2349* try re-constructing such frames - it'll be pretty much garbage.2350*/2351if (decap != RX_MSDU_DECAP_RAW ||2352skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {2353*drop_cnt += skb_queue_len(amsdu);2354__skb_queue_purge(amsdu);2355return;2356}23572358ath10k_unchain_msdu(amsdu, unchain_cnt);2359}23602361static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,2362struct sk_buff_head *amsdu)2363{2364u8 *subframe_hdr;2365struct sk_buff *first;2366bool is_first, is_last;2367struct ath10k_hw_params *hw = &ar->hw_params;2368struct htt_rx_desc *rxd;2369struct rx_msdu_end_common *rxd_msdu_end_common;2370struct rx_mpdu_start *rxd_mpdu_start;2371struct ieee80211_hdr *hdr;2372size_t hdr_len, crypto_len;2373enum htt_rx_mpdu_encrypt_type enctype;2374int bytes_aligned = ar->hw_params.decap_align_bytes;23752376first = skb_peek(amsdu);23772378rxd = HTT_RX_BUF_TO_RX_DESC(hw,2379#if defined(__linux__)2380(void *)first->data - hw->rx_desc_ops->rx_desc_size);2381#elif defined(__FreeBSD__)2382(u8 *)first->data - hw->rx_desc_ops->rx_desc_size);2383#endif23842385rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);2386rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);2387hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);23882389is_first = !!(rxd_msdu_end_common->info0 &2390__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));2391is_last = !!(rxd_msdu_end_common->info0 &2392__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));23932394/* Return in case of non-aggregated msdu */2395if (is_first && is_last)2396return true;23972398/* First msdu flag is not set for the first msdu of the list */2399if (!is_first)2400return false;24012402enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),2403RX_MPDU_START_INFO0_ENCRYPT_TYPE);24042405hdr_len = ieee80211_hdrlen(hdr->frame_control);2406crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);24072408subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +2409crypto_len;24102411/* Validate if the amsdu has a proper first subframe.2412* There are chances a single msdu can be received as amsdu when2413* the unauthenticated amsdu flag of a QoS header2414* gets flipped in non-SPP AMSDU's, in such cases the first2415* subframe has llc/snap header in place of a valid da.2416* return false if the da matches rfc1042 pattern2417*/2418if (ether_addr_equal(subframe_hdr, rfc1042_header))2419return false;24202421return true;2422}24232424static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,2425struct sk_buff_head *amsdu,2426struct ieee80211_rx_status *rx_status)2427{2428if (!rx_status->freq) {2429ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");2430return false;2431}24322433if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {2434ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");2435return false;2436}24372438if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {2439ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");2440return false;2441}24422443return true;2444}24452446static void ath10k_htt_rx_h_filter(struct ath10k *ar,2447struct sk_buff_head *amsdu,2448struct ieee80211_rx_status *rx_status,2449unsigned long *drop_cnt)2450{2451if (skb_queue_empty(amsdu))2452return;24532454if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))2455return;24562457if (drop_cnt)2458*drop_cnt += skb_queue_len(amsdu);24592460__skb_queue_purge(amsdu);2461}24622463static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)2464{2465struct ath10k *ar = htt->ar;2466struct ieee80211_rx_status *rx_status = &htt->rx_status;2467struct sk_buff_head amsdu;2468int ret;2469unsigned long drop_cnt = 0;2470unsigned long unchain_cnt = 0;2471unsigned long drop_cnt_filter = 0;2472unsigned long msdus_to_queue, num_msdus;2473enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;2474u8 first_hdr[RX_HTT_HDR_STATUS_LEN];24752476__skb_queue_head_init(&amsdu);24772478spin_lock_bh(&htt->rx_ring.lock);2479if (htt->rx_confused) {2480spin_unlock_bh(&htt->rx_ring.lock);2481return -EIO;2482}2483ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);2484spin_unlock_bh(&htt->rx_ring.lock);24852486if (ret < 0) {2487ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);2488__skb_queue_purge(&amsdu);2489/* FIXME: It's probably a good idea to reboot the2490* device instead of leaving it inoperable.2491*/2492htt->rx_confused = true;2493return ret;2494}24952496num_msdus = skb_queue_len(&amsdu);24972498ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);24992500/* only for ret = 1 indicates chained msdus */2501if (ret > 0)2502ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);25032504ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);2505ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,2506false);2507msdus_to_queue = skb_queue_len(&amsdu);2508ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);25092510ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,2511unchain_cnt, drop_cnt, drop_cnt_filter,2512msdus_to_queue);25132514return 0;2515}25162517static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,2518union htt_rx_pn_t *pn,2519int pn_len_bits)2520{2521switch (pn_len_bits) {2522case 48:2523pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +2524((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);2525break;2526case 24:2527pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);2528break;2529}2530}25312532static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,2533union htt_rx_pn_t *old_pn)2534{2535return ((new_pn->pn48 & 0xffffffffffffULL) <=2536(old_pn->pn48 & 0xffffffffffffULL));2537}25382539static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,2540struct ath10k_peer *peer,2541struct htt_rx_indication_hl *rx)2542{2543bool last_pn_valid, pn_invalid = false;2544enum htt_txrx_sec_cast_type sec_index;2545enum htt_security_types sec_type;2546union htt_rx_pn_t new_pn = {};2547struct htt_hl_rx_desc *rx_desc;2548union htt_rx_pn_t *last_pn;2549u32 rx_desc_info, tid;2550int num_mpdu_ranges;25512552lockdep_assert_held(&ar->data_lock);25532554if (!peer)2555return false;25562557if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))2558return false;25592560num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),2561HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);25622563rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];2564rx_desc_info = __le32_to_cpu(rx_desc->info);25652566if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))2567return false;25682569tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);2570last_pn_valid = peer->tids_last_pn_valid[tid];2571last_pn = &peer->tids_last_pn[tid];25722573if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))2574sec_index = HTT_TXRX_SEC_MCAST;2575else2576sec_index = HTT_TXRX_SEC_UCAST;25772578sec_type = peer->rx_pn[sec_index].sec_type;2579ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);25802581if (sec_type != HTT_SECURITY_AES_CCMP &&2582sec_type != HTT_SECURITY_TKIP &&2583sec_type != HTT_SECURITY_TKIP_NOMIC)2584return false;25852586if (last_pn_valid)2587pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);2588else2589peer->tids_last_pn_valid[tid] = true;25902591if (!pn_invalid)2592last_pn->pn48 = new_pn.pn48;25932594return pn_invalid;2595}25962597static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,2598struct htt_rx_indication_hl *rx,2599struct sk_buff *skb,2600enum htt_rx_pn_check_type check_pn_type,2601enum htt_rx_tkip_demic_type tkip_mic_type)2602{2603struct ath10k *ar = htt->ar;2604struct ath10k_peer *peer;2605struct htt_rx_indication_mpdu_range *mpdu_ranges;2606struct fw_rx_desc_hl *fw_desc;2607enum htt_txrx_sec_cast_type sec_index;2608enum htt_security_types sec_type;2609union htt_rx_pn_t new_pn = {};2610struct htt_hl_rx_desc *rx_desc;2611struct ieee80211_hdr *hdr;2612struct ieee80211_rx_status *rx_status;2613u16 peer_id;2614u8 rx_desc_len;2615int num_mpdu_ranges;2616size_t tot_hdr_len;2617struct ieee80211_channel *ch;2618bool pn_invalid, qos, first_msdu;2619u32 tid, rx_desc_info;26202621peer_id = __le16_to_cpu(rx->hdr.peer_id);2622tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);26232624spin_lock_bh(&ar->data_lock);2625peer = ath10k_peer_find_by_id(ar, peer_id);2626spin_unlock_bh(&ar->data_lock);2627if (!peer && peer_id != HTT_INVALID_PEERID)2628ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);26292630if (!peer)2631return true;26322633num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),2634HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);2635mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);2636fw_desc = &rx->fw_desc;2637rx_desc_len = fw_desc->len;26382639if (fw_desc->u.bits.discard) {2640ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");2641goto err;2642}26432644/* I have not yet seen any case where num_mpdu_ranges > 1.2645* qcacld does not seem handle that case either, so we introduce the2646* same limitation here as well.2647*/2648if (num_mpdu_ranges > 1)2649ath10k_warn(ar,2650"Unsupported number of MPDU ranges: %d, ignoring all but the first\n",2651num_mpdu_ranges);26522653if (mpdu_ranges->mpdu_range_status !=2654HTT_RX_IND_MPDU_STATUS_OK &&2655mpdu_ranges->mpdu_range_status !=2656HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {2657ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",2658mpdu_ranges->mpdu_range_status);2659goto err;2660}26612662rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];2663rx_desc_info = __le32_to_cpu(rx_desc->info);26642665if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))2666sec_index = HTT_TXRX_SEC_MCAST;2667else2668sec_index = HTT_TXRX_SEC_UCAST;26692670sec_type = peer->rx_pn[sec_index].sec_type;2671first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;26722673ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);26742675if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {2676spin_lock_bh(&ar->data_lock);2677pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);2678spin_unlock_bh(&ar->data_lock);26792680if (pn_invalid)2681goto err;2682}26832684/* Strip off all headers before the MAC header before delivery to2685* mac802112686*/2687tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +2688sizeof(rx->ppdu) + sizeof(rx->prefix) +2689sizeof(rx->fw_desc) +2690sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;26912692skb_pull(skb, tot_hdr_len);26932694hdr = (struct ieee80211_hdr *)skb->data;2695qos = ieee80211_is_data_qos(hdr->frame_control);26962697rx_status = IEEE80211_SKB_RXCB(skb);2698memset(rx_status, 0, sizeof(*rx_status));26992700if (rx->ppdu.combined_rssi == 0) {2701/* SDIO firmware does not provide signal */2702rx_status->signal = 0;2703rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;2704} else {2705rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +2706rx->ppdu.combined_rssi;2707rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;2708}27092710spin_lock_bh(&ar->data_lock);2711ch = ar->scan_channel;2712if (!ch)2713ch = ar->rx_channel;2714if (!ch)2715ch = ath10k_htt_rx_h_any_channel(ar);2716if (!ch)2717ch = ar->tgt_oper_chan;2718spin_unlock_bh(&ar->data_lock);27192720if (ch) {2721rx_status->band = ch->band;2722rx_status->freq = ch->center_freq;2723}2724if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)2725rx_status->flag &= ~RX_FLAG_AMSDU_MORE;2726else2727rx_status->flag |= RX_FLAG_AMSDU_MORE;27282729/* Not entirely sure about this, but all frames from the chipset has2730* the protected flag set even though they have already been decrypted.2731* Unmasking this flag is necessary in order for mac80211 not to drop2732* the frame.2733* TODO: Verify this is always the case or find out a way to check2734* if there has been hw decryption.2735*/2736if (ieee80211_has_protected(hdr->frame_control)) {2737hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);2738rx_status->flag |= RX_FLAG_DECRYPTED |2739RX_FLAG_IV_STRIPPED |2740RX_FLAG_MMIC_STRIPPED;27412742if (tid < IEEE80211_NUM_TIDS &&2743first_msdu &&2744check_pn_type == HTT_RX_PN_CHECK &&2745(sec_type == HTT_SECURITY_AES_CCMP ||2746sec_type == HTT_SECURITY_TKIP ||2747sec_type == HTT_SECURITY_TKIP_NOMIC)) {2748u8 offset, *ivp, i;2749s8 keyidx = 0;2750__le64 pn48 = cpu_to_le64(new_pn.pn48);27512752hdr = (struct ieee80211_hdr *)skb->data;2753offset = ieee80211_hdrlen(hdr->frame_control);2754hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);2755rx_status->flag &= ~RX_FLAG_IV_STRIPPED;27562757memmove(skb->data - IEEE80211_CCMP_HDR_LEN,2758skb->data, offset);2759skb_push(skb, IEEE80211_CCMP_HDR_LEN);2760ivp = skb->data + offset;2761memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);2762/* Ext IV */2763ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;27642765for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {2766if (peer->keys[i] &&2767peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)2768keyidx = peer->keys[i]->keyidx;2769}27702771/* Key ID */2772ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;27732774if (sec_type == HTT_SECURITY_AES_CCMP) {2775rx_status->flag |= RX_FLAG_MIC_STRIPPED;2776/* pn 0, pn 1 */2777memcpy(skb->data + offset, &pn48, 2);2778/* pn 1, pn 3 , pn 34 , pn 5 */2779memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);2780} else {2781rx_status->flag |= RX_FLAG_ICV_STRIPPED;2782/* TSC 0 */2783memcpy(skb->data + offset + 2, &pn48, 1);2784/* TSC 1 */2785memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);2786/* TSC 2 , TSC 3 , TSC 4 , TSC 5*/2787memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);2788}2789}2790}27912792if (tkip_mic_type == HTT_RX_TKIP_MIC)2793rx_status->flag &= ~RX_FLAG_IV_STRIPPED &2794~RX_FLAG_MMIC_STRIPPED;27952796if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)2797rx_status->flag |= RX_FLAG_MMIC_ERROR;27982799if (!qos && tid < IEEE80211_NUM_TIDS) {2800u8 offset;2801__le16 qos_ctrl = 0;28022803hdr = (struct ieee80211_hdr *)skb->data;2804offset = ieee80211_hdrlen(hdr->frame_control);28052806hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);2807memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);2808skb_push(skb, IEEE80211_QOS_CTL_LEN);2809qos_ctrl = cpu_to_le16(tid);2810memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);2811}28122813if (ar->napi.dev)2814ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);2815else2816ieee80211_rx_ni(ar->hw, skb);28172818/* We have delivered the skb to the upper layers (mac80211) so we2819* must not free it.2820*/2821return false;2822err:2823/* Tell the caller that it must free the skb since we have not2824* consumed it2825*/2826return true;2827}28282829static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,2830u16 head_len,2831u16 hdr_len)2832{2833u8 *ivp, *orig_hdr;28342835orig_hdr = skb->data;2836ivp = orig_hdr + hdr_len + head_len;28372838/* the ExtIV bit is always set to 1 for TKIP */2839if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))2840return -EINVAL;28412842memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);2843skb_pull(skb, IEEE80211_TKIP_IV_LEN);2844skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);2845return 0;2846}28472848static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,2849u16 head_len,2850u16 hdr_len)2851{2852u8 *ivp, *orig_hdr;28532854orig_hdr = skb->data;2855ivp = orig_hdr + hdr_len + head_len;28562857/* the ExtIV bit is always set to 1 for TKIP */2858if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))2859return -EINVAL;28602861memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);2862skb_pull(skb, IEEE80211_TKIP_IV_LEN);2863skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);2864return 0;2865}28662867static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,2868u16 head_len,2869u16 hdr_len)2870{2871u8 *ivp, *orig_hdr;28722873orig_hdr = skb->data;2874ivp = orig_hdr + hdr_len + head_len;28752876/* the ExtIV bit is always set to 1 for CCMP */2877if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))2878return -EINVAL;28792880skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);2881memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);2882skb_pull(skb, IEEE80211_CCMP_HDR_LEN);2883return 0;2884}28852886static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,2887u16 head_len,2888u16 hdr_len)2889{2890u8 *orig_hdr;28912892orig_hdr = skb->data;28932894memmove(orig_hdr + IEEE80211_WEP_IV_LEN,2895orig_hdr, head_len + hdr_len);2896skb_pull(skb, IEEE80211_WEP_IV_LEN);2897skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);2898return 0;2899}29002901static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,2902struct htt_rx_fragment_indication *rx,2903struct sk_buff *skb)2904{2905struct ath10k *ar = htt->ar;2906enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;2907enum htt_txrx_sec_cast_type sec_index;2908struct htt_rx_indication_hl *rx_hl;2909enum htt_security_types sec_type;2910u32 tid, frag, seq, rx_desc_info;2911union htt_rx_pn_t new_pn = {};2912struct htt_hl_rx_desc *rx_desc;2913u16 peer_id, sc, hdr_space;2914union htt_rx_pn_t *last_pn;2915struct ieee80211_hdr *hdr;2916int ret, num_mpdu_ranges;2917struct ath10k_peer *peer;2918struct htt_resp *resp;2919size_t tot_hdr_len;29202921resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);2922skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);2923skb_trim(skb, skb->len - FCS_LEN);29242925peer_id = __le16_to_cpu(rx->peer_id);2926rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);29272928spin_lock_bh(&ar->data_lock);2929peer = ath10k_peer_find_by_id(ar, peer_id);2930if (!peer) {2931ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);2932goto err;2933}29342935num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),2936HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);29372938tot_hdr_len = sizeof(struct htt_resp_hdr) +2939sizeof(rx_hl->hdr) +2940sizeof(rx_hl->ppdu) +2941sizeof(rx_hl->prefix) +2942sizeof(rx_hl->fw_desc) +2943sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;29442945tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);2946rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);2947rx_desc_info = __le32_to_cpu(rx_desc->info);29482949hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);29502951if (is_multicast_ether_addr(hdr->addr1)) {2952/* Discard the fragment with multicast DA */2953goto err;2954}29552956if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {2957spin_unlock_bh(&ar->data_lock);2958return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,2959HTT_RX_NON_PN_CHECK,2960HTT_RX_NON_TKIP_MIC);2961}29622963if (ieee80211_has_retry(hdr->frame_control))2964goto err;29652966hdr_space = ieee80211_hdrlen(hdr->frame_control);2967sc = __le16_to_cpu(hdr->seq_ctrl);2968seq = IEEE80211_SEQ_TO_SN(sc);2969frag = sc & IEEE80211_SCTL_FRAG;29702971sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?2972HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;2973sec_type = peer->rx_pn[sec_index].sec_type;2974ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);29752976switch (sec_type) {2977case HTT_SECURITY_TKIP:2978tkip_mic = HTT_RX_TKIP_MIC;2979ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,2980tot_hdr_len +2981rx_hl->fw_desc.len,2982hdr_space);2983if (ret)2984goto err;2985break;2986case HTT_SECURITY_TKIP_NOMIC:2987ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,2988tot_hdr_len +2989rx_hl->fw_desc.len,2990hdr_space);2991if (ret)2992goto err;2993break;2994case HTT_SECURITY_AES_CCMP:2995ret = ath10k_htt_rx_frag_ccmp_decap(skb,2996tot_hdr_len + rx_hl->fw_desc.len,2997hdr_space);2998if (ret)2999goto err;3000break;3001case HTT_SECURITY_WEP128:3002case HTT_SECURITY_WEP104:3003case HTT_SECURITY_WEP40:3004ret = ath10k_htt_rx_frag_wep_decap(skb,3005tot_hdr_len + rx_hl->fw_desc.len,3006hdr_space);3007if (ret)3008goto err;3009break;3010default:3011break;3012}30133014resp = (struct htt_resp *)(skb->data);30153016if (sec_type != HTT_SECURITY_AES_CCMP &&3017sec_type != HTT_SECURITY_TKIP &&3018sec_type != HTT_SECURITY_TKIP_NOMIC) {3019spin_unlock_bh(&ar->data_lock);3020return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,3021HTT_RX_NON_PN_CHECK,3022HTT_RX_NON_TKIP_MIC);3023}30243025last_pn = &peer->frag_tids_last_pn[tid];30263027if (frag == 0) {3028if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))3029goto err;30303031last_pn->pn48 = new_pn.pn48;3032peer->frag_tids_seq[tid] = seq;3033} else if (sec_type == HTT_SECURITY_AES_CCMP) {3034if (seq != peer->frag_tids_seq[tid])3035goto err;30363037if (new_pn.pn48 != last_pn->pn48 + 1)3038goto err;30393040last_pn->pn48 = new_pn.pn48;3041last_pn = &peer->tids_last_pn[tid];3042last_pn->pn48 = new_pn.pn48;3043}30443045spin_unlock_bh(&ar->data_lock);30463047return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,3048HTT_RX_NON_PN_CHECK, tkip_mic);30493050err:3051spin_unlock_bh(&ar->data_lock);30523053/* Tell the caller that it must free the skb since we have not3054* consumed it3055*/3056return true;3057}30583059static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,3060struct htt_rx_indication *rx)3061{3062struct ath10k *ar = htt->ar;3063struct htt_rx_indication_mpdu_range *mpdu_ranges;3064int num_mpdu_ranges;3065int i, mpdu_count = 0;3066u16 peer_id;3067u8 tid;30683069num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),3070HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);3071peer_id = __le16_to_cpu(rx->hdr.peer_id);3072tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);30733074mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);30753076ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",3077rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));30783079for (i = 0; i < num_mpdu_ranges; i++)3080mpdu_count += mpdu_ranges[i].mpdu_count;30813082atomic_add(mpdu_count, &htt->num_mpdus_ready);30833084ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,3085num_mpdu_ranges);3086}30873088static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,3089struct sk_buff *skb)3090{3091struct ath10k_htt *htt = &ar->htt;3092struct htt_resp *resp = (struct htt_resp *)skb->data;3093struct htt_tx_done tx_done = {};3094int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);3095__le16 msdu_id, *msdus;3096bool rssi_enabled = false;3097u8 msdu_count = 0, num_airtime_records, tid;3098int i, htt_pad = 0;3099struct htt_data_tx_compl_ppdu_dur *ppdu_info;3100struct ath10k_peer *peer;3101u16 ppdu_info_offset = 0, peer_id;3102u32 tx_duration;31033104switch (status) {3105case HTT_DATA_TX_STATUS_NO_ACK:3106tx_done.status = HTT_TX_COMPL_STATE_NOACK;3107break;3108case HTT_DATA_TX_STATUS_OK:3109tx_done.status = HTT_TX_COMPL_STATE_ACK;3110break;3111case HTT_DATA_TX_STATUS_DISCARD:3112case HTT_DATA_TX_STATUS_POSTPONE:3113tx_done.status = HTT_TX_COMPL_STATE_DISCARD;3114break;3115default:3116ath10k_warn(ar, "unhandled tx completion status %d\n", status);3117tx_done.status = HTT_TX_COMPL_STATE_DISCARD;3118break;3119}31203121ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",3122resp->data_tx_completion.num_msdus);31233124msdu_count = resp->data_tx_completion.num_msdus;3125msdus = resp->data_tx_completion.msdus;3126rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);31273128if (rssi_enabled)3129htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,3130resp);31313132for (i = 0; i < msdu_count; i++) {3133msdu_id = msdus[i];3134tx_done.msdu_id = __le16_to_cpu(msdu_id);31353136if (rssi_enabled) {3137/* Total no of MSDUs should be even,3138* if odd MSDUs are sent firmware fills3139* last msdu id with 0xffff3140*/3141if (msdu_count & 0x01) {3142msdu_id = msdus[msdu_count + i + 1 + htt_pad];3143tx_done.ack_rssi = __le16_to_cpu(msdu_id);3144} else {3145msdu_id = msdus[msdu_count + i + htt_pad];3146tx_done.ack_rssi = __le16_to_cpu(msdu_id);3147}3148}31493150/* kfifo_put: In practice firmware shouldn't fire off per-CE3151* interrupt and main interrupt (MSI/-X range case) for the same3152* HTC service so it should be safe to use kfifo_put w/o lock.3153*3154* From kfifo_put() documentation:3155* Note that with only one concurrent reader and one concurrent3156* writer, you don't need extra locking to use these macro.3157*/3158if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {3159ath10k_txrx_tx_unref(htt, &tx_done);3160} else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {3161ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",3162tx_done.msdu_id, tx_done.status);3163ath10k_txrx_tx_unref(htt, &tx_done);3164}3165}31663167if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))3168return;31693170ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;31713172if (rssi_enabled)3173ppdu_info_offset += ppdu_info_offset;31743175if (resp->data_tx_completion.flags2 &3176(HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))3177ppdu_info_offset += 2;31783179ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];3180num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,3181__le32_to_cpu(ppdu_info->info0));31823183for (i = 0; i < num_airtime_records; i++) {3184struct htt_data_tx_ppdu_dur *ppdu_dur;3185u32 info0;31863187ppdu_dur = &ppdu_info->ppdu_dur[i];3188info0 = __le32_to_cpu(ppdu_dur->info0);31893190peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,3191info0);3192rcu_read_lock();3193spin_lock_bh(&ar->data_lock);31943195peer = ath10k_peer_find_by_id(ar, peer_id);3196if (!peer || !peer->sta) {3197spin_unlock_bh(&ar->data_lock);3198rcu_read_unlock();3199continue;3200}32013202tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &3203IEEE80211_QOS_CTL_TID_MASK;3204tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);32053206ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);32073208spin_unlock_bh(&ar->data_lock);3209rcu_read_unlock();3210}3211}32123213static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)3214{3215struct htt_rx_addba *ev = &resp->rx_addba;3216struct ath10k_peer *peer;3217struct ath10k_vif *arvif;3218u16 info0, tid, peer_id;32193220info0 = __le16_to_cpu(ev->info0);3221tid = MS(info0, HTT_RX_BA_INFO0_TID);3222peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);32233224ath10k_dbg(ar, ATH10K_DBG_HTT,3225"htt rx addba tid %u peer_id %u size %u\n",3226tid, peer_id, ev->window_size);32273228spin_lock_bh(&ar->data_lock);3229peer = ath10k_peer_find_by_id(ar, peer_id);3230if (!peer) {3231ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",3232peer_id);3233spin_unlock_bh(&ar->data_lock);3234return;3235}32363237arvif = ath10k_get_arvif(ar, peer->vdev_id);3238if (!arvif) {3239ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",3240peer->vdev_id);3241spin_unlock_bh(&ar->data_lock);3242return;3243}32443245ath10k_dbg(ar, ATH10K_DBG_HTT,3246"htt rx start rx ba session sta %pM tid %u size %u\n",3247peer->addr, tid, ev->window_size);32483249ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);3250spin_unlock_bh(&ar->data_lock);3251}32523253static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)3254{3255struct htt_rx_delba *ev = &resp->rx_delba;3256struct ath10k_peer *peer;3257struct ath10k_vif *arvif;3258u16 info0, tid, peer_id;32593260info0 = __le16_to_cpu(ev->info0);3261tid = MS(info0, HTT_RX_BA_INFO0_TID);3262peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);32633264ath10k_dbg(ar, ATH10K_DBG_HTT,3265"htt rx delba tid %u peer_id %u\n",3266tid, peer_id);32673268spin_lock_bh(&ar->data_lock);3269peer = ath10k_peer_find_by_id(ar, peer_id);3270if (!peer) {3271ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",3272peer_id);3273spin_unlock_bh(&ar->data_lock);3274return;3275}32763277arvif = ath10k_get_arvif(ar, peer->vdev_id);3278if (!arvif) {3279ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",3280peer->vdev_id);3281spin_unlock_bh(&ar->data_lock);3282return;3283}32843285ath10k_dbg(ar, ATH10K_DBG_HTT,3286"htt rx stop rx ba session sta %pM tid %u\n",3287peer->addr, tid);32883289ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);3290spin_unlock_bh(&ar->data_lock);3291}32923293static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,3294struct sk_buff_head *list,3295struct sk_buff_head *amsdu)3296{3297struct sk_buff *msdu;3298struct htt_rx_desc *rxd;3299struct rx_msdu_end_common *rxd_msdu_end_common;33003301if (skb_queue_empty(list))3302return -ENOBUFS;33033304if (WARN_ON(!skb_queue_empty(amsdu)))3305return -EINVAL;33063307while ((msdu = __skb_dequeue(list))) {3308__skb_queue_tail(amsdu, msdu);33093310rxd = HTT_RX_BUF_TO_RX_DESC(hw,3311#if defined(__linux__)3312(void *)msdu->data -3313#elif defined(__FreeBSD__)3314(u8 *)msdu->data -3315#endif3316hw->rx_desc_ops->rx_desc_size);33173318rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);3319if (rxd_msdu_end_common->info0 &3320__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))3321break;3322}33233324msdu = skb_peek_tail(amsdu);3325rxd = HTT_RX_BUF_TO_RX_DESC(hw,3326#if defined(__linux__)3327(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);3328#elif defined(__FreeBSD__)3329(u8 *)msdu->data - hw->rx_desc_ops->rx_desc_size);3330#endif33313332rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);3333if (!(rxd_msdu_end_common->info0 &3334__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {3335skb_queue_splice_init(amsdu, list);3336return -EAGAIN;3337}33383339return 0;3340}33413342static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,3343struct sk_buff *skb)3344{3345struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;33463347if (!ieee80211_has_protected(hdr->frame_control))3348return;33493350/* Offloaded frames are already decrypted but firmware insists they are3351* protected in the 802.11 header. Strip the flag. Otherwise mac802113352* will drop the frame.3353*/33543355hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);3356status->flag |= RX_FLAG_DECRYPTED |3357RX_FLAG_IV_STRIPPED |3358RX_FLAG_MMIC_STRIPPED;3359}33603361static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,3362struct sk_buff_head *list)3363{3364struct ath10k_htt *htt = &ar->htt;3365struct ieee80211_rx_status *status = &htt->rx_status;3366struct htt_rx_offload_msdu *rx;3367struct sk_buff *msdu;3368size_t offset;33693370while ((msdu = __skb_dequeue(list))) {3371/* Offloaded frames don't have Rx descriptor. Instead they have3372* a short meta information header.3373*/33743375rx = (void *)msdu->data;33763377skb_put(msdu, sizeof(*rx));3378skb_pull(msdu, sizeof(*rx));33793380if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {3381ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");3382dev_kfree_skb_any(msdu);3383continue;3384}33853386skb_put(msdu, __le16_to_cpu(rx->msdu_len));33873388/* Offloaded rx header length isn't multiple of 2 nor 4 so the3389* actual payload is unaligned. Align the frame. Otherwise3390* mac80211 complains. This shouldn't reduce performance much3391* because these offloaded frames are rare.3392*/3393offset = 4 - ((unsigned long)msdu->data & 3);3394skb_put(msdu, offset);3395memmove(msdu->data + offset, msdu->data, msdu->len);3396skb_pull(msdu, offset);33973398/* FIXME: The frame is NWifi. Re-construct QoS Control3399* if possible later.3400*/34013402memset(status, 0, sizeof(*status));3403status->flag |= RX_FLAG_NO_SIGNAL_VAL;34043405ath10k_htt_rx_h_rx_offload_prot(status, msdu);3406ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);3407ath10k_htt_rx_h_queue_msdu(ar, status, msdu);3408}3409}34103411static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)3412{3413struct ath10k_htt *htt = &ar->htt;3414struct htt_resp *resp = (void *)skb->data;3415struct ieee80211_rx_status *status = &htt->rx_status;3416struct sk_buff_head list;3417struct sk_buff_head amsdu;3418u16 peer_id;3419u16 msdu_count;3420u8 vdev_id;3421u8 tid;3422bool offload;3423bool frag;3424int ret;34253426lockdep_assert_held(&htt->rx_ring.lock);34273428if (htt->rx_confused)3429return -EIO;34303431skb_pull(skb, sizeof(resp->hdr));3432skb_pull(skb, sizeof(resp->rx_in_ord_ind));34333434peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);3435msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);3436vdev_id = resp->rx_in_ord_ind.vdev_id;3437tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);3438offload = !!(resp->rx_in_ord_ind.info &3439HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);3440frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);34413442ath10k_dbg(ar, ATH10K_DBG_HTT,3443"htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",3444vdev_id, peer_id, tid, offload, frag, msdu_count);34453446if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {3447ath10k_warn(ar, "dropping invalid in order rx indication\n");3448return -EINVAL;3449}34503451/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later3452* extracted and processed.3453*/3454__skb_queue_head_init(&list);3455if (ar->hw_params.target_64bit)3456ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,3457&list);3458else3459ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,3460&list);34613462if (ret < 0) {3463ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);3464htt->rx_confused = true;3465return -EIO;3466}34673468/* Offloaded frames are very different and need to be handled3469* separately.3470*/3471if (offload)3472ath10k_htt_rx_h_rx_offload(ar, &list);34733474while (!skb_queue_empty(&list)) {3475__skb_queue_head_init(&amsdu);3476ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);3477switch (ret) {3478case 0:3479/* Note: The in-order indication may report interleaved3480* frames from different PPDUs meaning reported rx rate3481* to mac80211 isn't accurate/reliable. It's still3482* better to report something than nothing though. This3483* should still give an idea about rx rate to the user.3484*/3485ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);3486ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);3487ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,3488NULL, peer_id, frag);3489ath10k_htt_rx_h_enqueue(ar, &amsdu, status);3490break;3491case -EAGAIN:3492fallthrough;3493default:3494/* Should not happen. */3495ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);3496htt->rx_confused = true;3497__skb_queue_purge(&list);3498return -EIO;3499}3500}3501return ret;3502}35033504static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,3505const __le32 *resp_ids,3506int num_resp_ids)3507{3508int i;3509u32 resp_id;35103511ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",3512num_resp_ids);35133514for (i = 0; i < num_resp_ids; i++) {3515resp_id = le32_to_cpu(resp_ids[i]);35163517ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",3518resp_id);35193520/* TODO: free resp_id */3521}3522}35233524static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)3525{3526struct ieee80211_hw *hw = ar->hw;3527struct ieee80211_txq *txq;3528struct htt_resp *resp = (struct htt_resp *)skb->data;3529struct htt_tx_fetch_record *record;3530size_t len;3531size_t max_num_bytes;3532size_t max_num_msdus;3533size_t num_bytes;3534size_t num_msdus;3535const __le32 *resp_ids;3536u16 num_records;3537u16 num_resp_ids;3538u16 peer_id;3539u8 tid;3540int ret;3541int i;3542bool may_tx;35433544ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");35453546len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);3547if (unlikely(skb->len < len)) {3548ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");3549return;3550}35513552num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);3553num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);35543555len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;3556len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;35573558if (unlikely(skb->len < len)) {3559ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");3560return;3561}35623563ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",3564num_records, num_resp_ids,3565le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));35663567if (!ar->htt.tx_q_state.enabled) {3568ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");3569return;3570}35713572if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {3573ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");3574return;3575}35763577rcu_read_lock();35783579for (i = 0; i < num_records; i++) {3580record = &resp->tx_fetch_ind.records[i];3581peer_id = MS(le16_to_cpu(record->info),3582HTT_TX_FETCH_RECORD_INFO_PEER_ID);3583tid = MS(le16_to_cpu(record->info),3584HTT_TX_FETCH_RECORD_INFO_TID);3585max_num_msdus = le16_to_cpu(record->num_msdus);3586max_num_bytes = le32_to_cpu(record->num_bytes);35873588ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",3589i, peer_id, tid, max_num_msdus, max_num_bytes);35903591if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||3592unlikely(tid >= ar->htt.tx_q_state.num_tids)) {3593ath10k_warn(ar, "received out of range peer_id %u tid %u\n",3594peer_id, tid);3595continue;3596}35973598spin_lock_bh(&ar->data_lock);3599txq = ath10k_mac_txq_lookup(ar, peer_id, tid);3600spin_unlock_bh(&ar->data_lock);36013602/* It is okay to release the lock and use txq because RCU read3603* lock is held.3604*/36053606if (unlikely(!txq)) {3607ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",3608peer_id, tid);3609continue;3610}36113612num_msdus = 0;3613num_bytes = 0;36143615ieee80211_txq_schedule_start(hw, txq->ac);3616may_tx = ieee80211_txq_may_transmit(hw, txq);3617while (num_msdus < max_num_msdus &&3618num_bytes < max_num_bytes) {3619if (!may_tx)3620break;36213622ret = ath10k_mac_tx_push_txq(hw, txq);3623if (ret < 0)3624break;36253626num_msdus++;3627num_bytes += ret;3628}3629ieee80211_return_txq(hw, txq, false);3630ieee80211_txq_schedule_end(hw, txq->ac);36313632record->num_msdus = cpu_to_le16(num_msdus);3633record->num_bytes = cpu_to_le32(num_bytes);36343635ath10k_htt_tx_txq_recalc(hw, txq);3636}36373638rcu_read_unlock();36393640resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);3641ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);36423643ret = ath10k_htt_tx_fetch_resp(ar,3644resp->tx_fetch_ind.token,3645resp->tx_fetch_ind.fetch_seq_num,3646resp->tx_fetch_ind.records,3647num_records);3648if (unlikely(ret)) {3649ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",3650le32_to_cpu(resp->tx_fetch_ind.token), ret);3651/* FIXME: request fw restart */3652}36533654ath10k_htt_tx_txq_sync(ar);3655}36563657static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,3658struct sk_buff *skb)3659{3660const struct htt_resp *resp = (void *)skb->data;3661size_t len;3662int num_resp_ids;36633664ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");36653666len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);3667if (unlikely(skb->len < len)) {3668ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");3669return;3670}36713672num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);3673len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;36743675if (unlikely(skb->len < len)) {3676ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");3677return;3678}36793680ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,3681resp->tx_fetch_confirm.resp_ids,3682num_resp_ids);3683}36843685static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,3686struct sk_buff *skb)3687{3688const struct htt_resp *resp = (void *)skb->data;3689const struct htt_tx_mode_switch_record *record;3690struct ieee80211_txq *txq;3691struct ath10k_txq *artxq;3692size_t len;3693size_t num_records;3694enum htt_tx_mode_switch_mode mode;3695bool enable;3696u16 info0;3697u16 info1;3698u16 threshold;3699u16 peer_id;3700u8 tid;3701int i;37023703ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");37043705len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);3706if (unlikely(skb->len < len)) {3707ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");3708return;3709}37103711info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);3712info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);37133714enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);3715num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);3716mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);3717threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);37183719ath10k_dbg(ar, ATH10K_DBG_HTT,3720"htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",3721info0, info1, enable, num_records, mode, threshold);37223723len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;37243725if (unlikely(skb->len < len)) {3726ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");3727return;3728}37293730switch (mode) {3731case HTT_TX_MODE_SWITCH_PUSH:3732case HTT_TX_MODE_SWITCH_PUSH_PULL:3733break;3734default:3735ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",3736mode);3737return;3738}37393740if (!enable)3741return;37423743ar->htt.tx_q_state.enabled = enable;3744ar->htt.tx_q_state.mode = mode;3745ar->htt.tx_q_state.num_push_allowed = threshold;37463747rcu_read_lock();37483749for (i = 0; i < num_records; i++) {3750record = &resp->tx_mode_switch_ind.records[i];3751info0 = le16_to_cpu(record->info0);3752peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);3753tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);37543755if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||3756unlikely(tid >= ar->htt.tx_q_state.num_tids)) {3757ath10k_warn(ar, "received out of range peer_id %u tid %u\n",3758peer_id, tid);3759continue;3760}37613762spin_lock_bh(&ar->data_lock);3763txq = ath10k_mac_txq_lookup(ar, peer_id, tid);3764spin_unlock_bh(&ar->data_lock);37653766/* It is okay to release the lock and use txq because RCU read3767* lock is held.3768*/37693770if (unlikely(!txq)) {3771ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",3772peer_id, tid);3773continue;3774}37753776spin_lock_bh(&ar->htt.tx_lock);3777artxq = (void *)txq->drv_priv;3778artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);3779spin_unlock_bh(&ar->htt.tx_lock);3780}37813782rcu_read_unlock();37833784ath10k_mac_tx_push_pending(ar);3785}37863787void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)3788{3789bool release;37903791release = ath10k_htt_t2h_msg_handler(ar, skb);37923793/* Free the indication buffer */3794if (release)3795dev_kfree_skb_any(skb);3796}37973798static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)3799{3800static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,380118, 24, 36, 48, 54};3802int i;38033804for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {3805if (rate == legacy_rates[i])3806return i;3807}38083809ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);3810return -EINVAL;3811}38123813static void3814ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,3815struct ath10k_sta *arsta,3816struct ath10k_per_peer_tx_stats *pstats,3817s8 legacy_rate_idx)3818{3819struct rate_info *txrate = &arsta->txrate;3820struct ath10k_htt_tx_stats *tx_stats;3821int idx, ht_idx, gi, mcs, bw, nss;3822unsigned long flags;38233824if (!arsta->tx_stats)3825return;38263827tx_stats = arsta->tx_stats;3828flags = txrate->flags;3829gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);3830mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);3831bw = txrate->bw;3832nss = txrate->nss;3833ht_idx = mcs + (nss - 1) * 8;3834idx = mcs * 8 + 8 * 10 * (nss - 1);3835idx += bw * 2 + gi;38363837#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]38383839if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {3840STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;3841STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;3842STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;3843STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;3844STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;3845STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;3846} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {3847STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;3848STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;3849STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;3850STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;3851STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;3852STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;3853} else {3854mcs = legacy_rate_idx;38553856STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;3857STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;3858STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;3859STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;3860STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;3861STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;3862}38633864if (ATH10K_HW_AMPDU(pstats->flags)) {3865tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);38663867if (txrate->flags & RATE_INFO_FLAGS_MCS) {3868STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=3869pstats->succ_bytes + pstats->retry_bytes;3870STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=3871pstats->succ_pkts + pstats->retry_pkts;3872} else {3873STATS_OP_FMT(AMPDU).vht[0][mcs] +=3874pstats->succ_bytes + pstats->retry_bytes;3875STATS_OP_FMT(AMPDU).vht[1][mcs] +=3876pstats->succ_pkts + pstats->retry_pkts;3877}3878STATS_OP_FMT(AMPDU).bw[0][bw] +=3879pstats->succ_bytes + pstats->retry_bytes;3880STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=3881pstats->succ_bytes + pstats->retry_bytes;3882STATS_OP_FMT(AMPDU).gi[0][gi] +=3883pstats->succ_bytes + pstats->retry_bytes;3884STATS_OP_FMT(AMPDU).rate_table[0][idx] +=3885pstats->succ_bytes + pstats->retry_bytes;3886STATS_OP_FMT(AMPDU).bw[1][bw] +=3887pstats->succ_pkts + pstats->retry_pkts;3888STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=3889pstats->succ_pkts + pstats->retry_pkts;3890STATS_OP_FMT(AMPDU).gi[1][gi] +=3891pstats->succ_pkts + pstats->retry_pkts;3892STATS_OP_FMT(AMPDU).rate_table[1][idx] +=3893pstats->succ_pkts + pstats->retry_pkts;3894} else {3895tx_stats->ack_fails +=3896ATH10K_HW_BA_FAIL(pstats->flags);3897}38983899STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;3900STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;3901STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;39023903STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;3904STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;3905STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;39063907STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;3908STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;3909STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;39103911STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;3912STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;3913STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;39143915STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;3916STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;3917STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;39183919STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;3920STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;3921STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;39223923if (txrate->flags >= RATE_INFO_FLAGS_MCS) {3924STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;3925STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;3926STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;3927STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;3928STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;3929STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;3930}39313932tx_stats->tx_duration += pstats->duration;3933}39343935static void3936ath10k_update_per_peer_tx_stats(struct ath10k *ar,3937struct ieee80211_sta *sta,3938struct ath10k_per_peer_tx_stats *peer_stats)3939{3940struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;3941struct ieee80211_chanctx_conf *conf = NULL;3942u8 rate = 0, sgi;3943s8 rate_idx = 0;3944bool skip_auto_rate;3945struct rate_info txrate;39463947lockdep_assert_held(&ar->data_lock);39483949txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);3950txrate.bw = ATH10K_HW_BW(peer_stats->flags);3951txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);3952txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);3953sgi = ATH10K_HW_GI(peer_stats->flags);3954skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);39553956/* Firmware's rate control skips broadcast/management frames,3957* if host has configure fixed rates and in some other special cases.3958*/3959if (skip_auto_rate)3960return;39613962if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {3963ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs);3964return;3965}39663967if (txrate.flags == WMI_RATE_PREAMBLE_HT &&3968(txrate.mcs > 7 || txrate.nss < 1)) {3969ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",3970txrate.mcs, txrate.nss);3971return;3972}39733974memset(&arsta->txrate, 0, sizeof(arsta->txrate));3975memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));3976if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||3977txrate.flags == WMI_RATE_PREAMBLE_OFDM) {3978rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);3979/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */3980if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)3981rate = 5;3982rate_idx = ath10k_get_legacy_rate_idx(ar, rate);3983if (rate_idx < 0)3984return;3985arsta->txrate.legacy = rate;3986} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {3987arsta->txrate.flags = RATE_INFO_FLAGS_MCS;3988arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);3989} else {3990arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;3991arsta->txrate.mcs = txrate.mcs;3992}39933994switch (txrate.flags) {3995case WMI_RATE_PREAMBLE_OFDM:3996if (arsta->arvif && arsta->arvif->vif)3997conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);3998if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)3999arsta->tx_info.status.rates[0].idx = rate_idx - 4;4000break;4001case WMI_RATE_PREAMBLE_CCK:4002arsta->tx_info.status.rates[0].idx = rate_idx;4003if (sgi)4004arsta->tx_info.status.rates[0].flags |=4005(IEEE80211_TX_RC_USE_SHORT_PREAMBLE |4006IEEE80211_TX_RC_SHORT_GI);4007break;4008case WMI_RATE_PREAMBLE_HT:4009arsta->tx_info.status.rates[0].idx =4010txrate.mcs + ((txrate.nss - 1) * 8);4011if (sgi)4012arsta->tx_info.status.rates[0].flags |=4013IEEE80211_TX_RC_SHORT_GI;4014arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;4015break;4016case WMI_RATE_PREAMBLE_VHT:4017ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],4018txrate.mcs, txrate.nss);4019if (sgi)4020arsta->tx_info.status.rates[0].flags |=4021IEEE80211_TX_RC_SHORT_GI;4022arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;4023break;4024}40254026arsta->txrate.nss = txrate.nss;4027arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);4028arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);4029if (sgi)4030arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;40314032switch (arsta->txrate.bw) {4033case RATE_INFO_BW_40:4034arsta->tx_info.status.rates[0].flags |=4035IEEE80211_TX_RC_40_MHZ_WIDTH;4036break;4037case RATE_INFO_BW_80:4038arsta->tx_info.status.rates[0].flags |=4039IEEE80211_TX_RC_80_MHZ_WIDTH;4040break;4041case RATE_INFO_BW_160:4042arsta->tx_info.status.rates[0].flags |=4043IEEE80211_TX_RC_160_MHZ_WIDTH;4044break;4045}40464047if (peer_stats->succ_pkts) {4048arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;4049arsta->tx_info.status.rates[0].count = 1;4050ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);4051}40524053if (ar->htt.disable_tx_comp) {4054arsta->tx_failed += peer_stats->failed_pkts;4055ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",4056arsta->tx_failed);4057}40584059arsta->tx_retries += peer_stats->retry_pkts;4060ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);40614062if (ath10k_debug_is_extd_tx_stats_enabled(ar))4063ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,4064rate_idx);4065}40664067static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,4068struct sk_buff *skb)4069{4070struct htt_resp *resp = (struct htt_resp *)skb->data;4071struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;4072struct htt_per_peer_tx_stats_ind *tx_stats;4073struct ieee80211_sta *sta;4074struct ath10k_peer *peer;4075int peer_id, i;4076u8 ppdu_len, num_ppdu;40774078num_ppdu = resp->peer_tx_stats.num_ppdu;4079ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);40804081if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {4082ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);4083return;4084}40854086tx_stats = (struct htt_per_peer_tx_stats_ind *)4087(resp->peer_tx_stats.payload);4088peer_id = __le16_to_cpu(tx_stats->peer_id);40894090rcu_read_lock();4091spin_lock_bh(&ar->data_lock);4092peer = ath10k_peer_find_by_id(ar, peer_id);4093if (!peer || !peer->sta) {4094ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",4095peer_id);4096goto out;4097}40984099sta = peer->sta;4100for (i = 0; i < num_ppdu; i++) {4101tx_stats = (struct htt_per_peer_tx_stats_ind *)4102(resp->peer_tx_stats.payload + i * ppdu_len);41034104p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);4105p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);4106p_tx_stats->failed_bytes =4107__le32_to_cpu(tx_stats->failed_bytes);4108p_tx_stats->ratecode = tx_stats->ratecode;4109p_tx_stats->flags = tx_stats->flags;4110p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);4111p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);4112p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);4113p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);41144115ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);4116}41174118out:4119spin_unlock_bh(&ar->data_lock);4120rcu_read_unlock();4121}41224123static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)4124{4125struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;4126struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;4127struct ath10k_10_2_peer_tx_stats *tx_stats;4128struct ieee80211_sta *sta;4129struct ath10k_peer *peer;4130u16 log_type = __le16_to_cpu(hdr->log_type);4131u32 peer_id = 0, i;41324133if (log_type != ATH_PKTLOG_TYPE_TX_STAT)4134return;41354136tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +4137ATH10K_10_2_TX_STATS_OFFSET);41384139if (!tx_stats->tx_ppdu_cnt)4140return;41414142peer_id = tx_stats->peer_id;41434144rcu_read_lock();4145spin_lock_bh(&ar->data_lock);4146peer = ath10k_peer_find_by_id(ar, peer_id);4147if (!peer || !peer->sta) {4148ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",4149peer_id);4150goto out;4151}41524153sta = peer->sta;4154for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {4155p_tx_stats->succ_bytes =4156__le16_to_cpu(tx_stats->success_bytes[i]);4157p_tx_stats->retry_bytes =4158__le16_to_cpu(tx_stats->retry_bytes[i]);4159p_tx_stats->failed_bytes =4160__le16_to_cpu(tx_stats->failed_bytes[i]);4161p_tx_stats->ratecode = tx_stats->ratecode[i];4162p_tx_stats->flags = tx_stats->flags[i];4163p_tx_stats->succ_pkts = tx_stats->success_pkts[i];4164p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];4165p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];41664167ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);4168}4169spin_unlock_bh(&ar->data_lock);4170rcu_read_unlock();41714172return;41734174out:4175spin_unlock_bh(&ar->data_lock);4176rcu_read_unlock();4177}41784179static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)4180{4181switch (sec_type) {4182case HTT_SECURITY_TKIP:4183case HTT_SECURITY_TKIP_NOMIC:4184case HTT_SECURITY_AES_CCMP:4185return 48;4186default:4187return 0;4188}4189}41904191static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,4192struct htt_security_indication *ev)4193{4194enum htt_txrx_sec_cast_type sec_index;4195enum htt_security_types sec_type;4196struct ath10k_peer *peer;41974198spin_lock_bh(&ar->data_lock);41994200peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));4201if (!peer) {4202ath10k_warn(ar, "failed to find peer id %d for security indication",4203__le16_to_cpu(ev->peer_id));4204goto out;4205}42064207sec_type = MS(ev->flags, HTT_SECURITY_TYPE);42084209if (ev->flags & HTT_SECURITY_IS_UNICAST)4210sec_index = HTT_TXRX_SEC_UCAST;4211else4212sec_index = HTT_TXRX_SEC_MCAST;42134214peer->rx_pn[sec_index].sec_type = sec_type;4215peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);42164217memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));4218memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));42194220out:4221spin_unlock_bh(&ar->data_lock);4222}42234224bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)4225{4226struct ath10k_htt *htt = &ar->htt;4227struct htt_resp *resp = (struct htt_resp *)skb->data;4228enum htt_t2h_msg_type type;42294230/* confirm alignment */4231if (!IS_ALIGNED((unsigned long)skb->data, 4))4232ath10k_warn(ar, "unaligned htt message, expect trouble\n");42334234ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",4235resp->hdr.msg_type);42364237if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {4238ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",4239resp->hdr.msg_type, ar->htt.t2h_msg_types_max);4240return true;4241}4242type = ar->htt.t2h_msg_types[resp->hdr.msg_type];42434244switch (type) {4245case HTT_T2H_MSG_TYPE_VERSION_CONF: {4246htt->target_version_major = resp->ver_resp.major;4247htt->target_version_minor = resp->ver_resp.minor;4248complete(&htt->target_version_received);4249break;4250}4251case HTT_T2H_MSG_TYPE_RX_IND:4252if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {4253ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);4254} else {4255skb_queue_tail(&htt->rx_indication_head, skb);4256return false;4257}4258break;4259case HTT_T2H_MSG_TYPE_PEER_MAP: {4260struct htt_peer_map_event ev = {4261.vdev_id = resp->peer_map.vdev_id,4262.peer_id = __le16_to_cpu(resp->peer_map.peer_id),4263};4264memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));4265ath10k_peer_map_event(htt, &ev);4266break;4267}4268case HTT_T2H_MSG_TYPE_PEER_UNMAP: {4269struct htt_peer_unmap_event ev = {4270.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),4271};4272ath10k_peer_unmap_event(htt, &ev);4273break;4274}4275case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {4276struct htt_tx_done tx_done = {};4277struct ath10k_htt *htt = &ar->htt;4278struct ath10k_htc *htc = &ar->htc;4279struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];4280int status = __le32_to_cpu(resp->mgmt_tx_completion.status);4281int info = __le32_to_cpu(resp->mgmt_tx_completion.info);42824283tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);42844285switch (status) {4286case HTT_MGMT_TX_STATUS_OK:4287tx_done.status = HTT_TX_COMPL_STATE_ACK;4288if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,4289ar->wmi.svc_map) &&4290(resp->mgmt_tx_completion.flags &4291HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {4292tx_done.ack_rssi =4293FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,4294info);4295}4296break;4297case HTT_MGMT_TX_STATUS_RETRY:4298tx_done.status = HTT_TX_COMPL_STATE_NOACK;4299break;4300case HTT_MGMT_TX_STATUS_DROP:4301tx_done.status = HTT_TX_COMPL_STATE_DISCARD;4302break;4303}43044305if (htt->disable_tx_comp) {4306spin_lock_bh(&htc->tx_lock);4307ep->tx_credits++;4308spin_unlock_bh(&htc->tx_lock);4309}43104311status = ath10k_txrx_tx_unref(htt, &tx_done);4312if (!status) {4313spin_lock_bh(&htt->tx_lock);4314ath10k_htt_tx_mgmt_dec_pending(htt);4315spin_unlock_bh(&htt->tx_lock);4316}4317break;4318}4319case HTT_T2H_MSG_TYPE_TX_COMPL_IND:4320ath10k_htt_rx_tx_compl_ind(htt->ar, skb);4321break;4322case HTT_T2H_MSG_TYPE_SEC_IND: {4323struct ath10k *ar = htt->ar;4324struct htt_security_indication *ev = &resp->security_indication;43254326ath10k_htt_rx_sec_ind_handler(ar, ev);4327ath10k_dbg(ar, ATH10K_DBG_HTT,4328"sec ind peer_id %d unicast %d type %d\n",4329__le16_to_cpu(ev->peer_id),4330!!(ev->flags & HTT_SECURITY_IS_UNICAST),4331MS(ev->flags, HTT_SECURITY_TYPE));4332complete(&ar->install_key_done);4333break;4334}4335case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {4336ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",4337skb->data, skb->len);4338atomic_inc(&htt->num_mpdus_ready);43394340return ath10k_htt_rx_proc_rx_frag_ind(htt,4341&resp->rx_frag_ind,4342skb);4343}4344case HTT_T2H_MSG_TYPE_TEST:4345break;4346case HTT_T2H_MSG_TYPE_STATS_CONF:4347trace_ath10k_htt_stats(ar, skb->data, skb->len);4348break;4349case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:4350/* Firmware can return tx frames if it's unable to fully4351* process them and suspects host may be able to fix it. ath10k4352* sends all tx frames as already inspected so this shouldn't4353* happen unless fw has a bug.4354*/4355ath10k_warn(ar, "received an unexpected htt tx inspect event\n");4356break;4357case HTT_T2H_MSG_TYPE_RX_ADDBA:4358ath10k_htt_rx_addba(ar, resp);4359break;4360case HTT_T2H_MSG_TYPE_RX_DELBA:4361ath10k_htt_rx_delba(ar, resp);4362break;4363case HTT_T2H_MSG_TYPE_PKTLOG: {4364trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,4365skb->len -4366offsetof(struct htt_resp,4367pktlog_msg.payload));43684369if (ath10k_peer_stats_enabled(ar))4370ath10k_fetch_10_2_tx_stats(ar,4371resp->pktlog_msg.payload);4372break;4373}4374case HTT_T2H_MSG_TYPE_RX_FLUSH: {4375/* Ignore this event because mac80211 takes care of Rx4376* aggregation reordering.4377*/4378break;4379}4380case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {4381skb_queue_tail(&htt->rx_in_ord_compl_q, skb);4382return false;4383}4384case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {4385struct ath10k_htt *htt = &ar->htt;4386struct ath10k_htc *htc = &ar->htc;4387struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];4388u32 msg_word = __le32_to_cpu(*(__le32 *)resp);4389int htt_credit_delta;43904391htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);4392if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))4393htt_credit_delta = -htt_credit_delta;43944395ath10k_dbg(ar, ATH10K_DBG_HTT,4396"htt credit update delta %d\n",4397htt_credit_delta);43984399if (htt->disable_tx_comp) {4400spin_lock_bh(&htc->tx_lock);4401ep->tx_credits += htt_credit_delta;4402spin_unlock_bh(&htc->tx_lock);4403ath10k_dbg(ar, ATH10K_DBG_HTT,4404"htt credit total %d\n",4405ep->tx_credits);4406ep->ep_ops.ep_tx_credits(htc->ar);4407}4408break;4409}4410case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {4411u32 phymode = __le32_to_cpu(resp->chan_change.phymode);4412u32 freq = __le32_to_cpu(resp->chan_change.freq);44134414ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);4415ath10k_dbg(ar, ATH10K_DBG_HTT,4416"htt chan change freq %u phymode %s\n",4417freq, ath10k_wmi_phymode_str(phymode));4418break;4419}4420case HTT_T2H_MSG_TYPE_AGGR_CONF:4421break;4422case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {4423struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);44244425if (!tx_fetch_ind) {4426ath10k_warn(ar, "failed to copy htt tx fetch ind\n");4427break;4428}4429skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);4430break;4431}4432case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:4433ath10k_htt_rx_tx_fetch_confirm(ar, skb);4434break;4435case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:4436ath10k_htt_rx_tx_mode_switch_ind(ar, skb);4437break;4438case HTT_T2H_MSG_TYPE_PEER_STATS:4439ath10k_htt_fetch_peer_stats(ar, skb);4440break;4441case HTT_T2H_MSG_TYPE_EN_STATS:4442default:4443ath10k_warn(ar, "htt event (%d) not handled\n",4444resp->hdr.msg_type);4445ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",4446skb->data, skb->len);4447break;4448}4449return true;4450}4451EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);44524453void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,4454struct sk_buff *skb)4455{4456trace_ath10k_htt_pktlog(ar, skb->data, skb->len);4457dev_kfree_skb_any(skb);4458}4459EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);44604461static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)4462{4463struct sk_buff *skb;44644465while (quota < budget) {4466if (skb_queue_empty(&ar->htt.rx_msdus_q))4467break;44684469skb = skb_dequeue(&ar->htt.rx_msdus_q);4470if (!skb)4471break;4472ath10k_process_rx(ar, skb);4473quota++;4474}44754476return quota;4477}44784479int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)4480{4481struct htt_resp *resp;4482struct ath10k_htt *htt = &ar->htt;4483struct sk_buff *skb;4484bool release;4485int quota;44864487for (quota = 0; quota < budget; quota++) {4488skb = skb_dequeue(&htt->rx_indication_head);4489if (!skb)4490break;44914492resp = (struct htt_resp *)skb->data;44934494release = ath10k_htt_rx_proc_rx_ind_hl(htt,4495&resp->rx_ind_hl,4496skb,4497HTT_RX_PN_CHECK,4498HTT_RX_NON_TKIP_MIC);44994500if (release)4501dev_kfree_skb_any(skb);45024503ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",4504skb_queue_len(&htt->rx_indication_head));4505}4506return quota;4507}4508EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);45094510int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)4511{4512struct ath10k_htt *htt = &ar->htt;4513struct htt_tx_done tx_done = {};4514struct sk_buff_head tx_ind_q;4515struct sk_buff *skb;4516unsigned long flags;4517int quota = 0, done, ret;4518bool resched_napi = false;45194520__skb_queue_head_init(&tx_ind_q);45214522/* Process pending frames before dequeuing more data4523* from hardware.4524*/4525quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);4526if (quota == budget) {4527resched_napi = true;4528goto exit;4529}45304531while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {4532spin_lock_bh(&htt->rx_ring.lock);4533ret = ath10k_htt_rx_in_ord_ind(ar, skb);4534spin_unlock_bh(&htt->rx_ring.lock);45354536dev_kfree_skb_any(skb);4537if (ret == -EIO) {4538resched_napi = true;4539goto exit;4540}4541}45424543while (atomic_read(&htt->num_mpdus_ready)) {4544ret = ath10k_htt_rx_handle_amsdu(htt);4545if (ret == -EIO) {4546resched_napi = true;4547goto exit;4548}4549atomic_dec(&htt->num_mpdus_ready);4550}45514552/* Deliver received data after processing data from hardware */4553quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);45544555/* From NAPI documentation:4556* The napi poll() function may also process TX completions, in which4557* case if it processes the entire TX ring then it should count that4558* work as the rest of the budget.4559*/4560if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))4561quota = budget;45624563/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.4564* From kfifo_get() documentation:4565* Note that with only one concurrent reader and one concurrent writer,4566* you don't need extra locking to use these macro.4567*/4568while (kfifo_get(&htt->txdone_fifo, &tx_done))4569ath10k_txrx_tx_unref(htt, &tx_done);45704571ath10k_mac_tx_push_pending(ar);45724573spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);4574skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);4575spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);45764577while ((skb = __skb_dequeue(&tx_ind_q))) {4578ath10k_htt_rx_tx_fetch_ind(ar, skb);4579dev_kfree_skb_any(skb);4580}45814582exit:4583ath10k_htt_rx_msdu_buff_replenish(htt);4584/* In case of rx failure or more data to read, report budget4585* to reschedule NAPI poll4586*/4587done = resched_napi ? budget : quota;45884589return done;4590}4591EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);45924593static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {4594.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,4595.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,4596.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,4597.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,4598.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,4599};46004601static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {4602.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,4603.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,4604.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,4605.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,4606.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,4607};46084609static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {4610.htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,4611};46124613void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)4614{4615struct ath10k *ar = htt->ar;46164617if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)4618htt->rx_ops = &htt_rx_ops_hl;4619else if (ar->hw_params.target_64bit)4620htt->rx_ops = &htt_rx_ops_64;4621else4622htt->rx_ops = &htt_rx_ops_32;4623}462446254626