Path: blob/main/sys/contrib/dev/athk/ath11k/dp_rx.c
106915 views
// SPDX-License-Identifier: BSD-3-Clause-Clear1/*2* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.3* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.4*/56#include <linux/ieee80211.h>7#include <linux/kernel.h>8#include <linux/skbuff.h>9#include <crypto/hash.h>10#include "core.h"11#include "debug.h"12#include "debugfs_htt_stats.h"13#include "debugfs_sta.h"14#include "hal_desc.h"15#include "hw.h"16#include "dp_rx.h"17#include "hal_rx.h"18#include "dp_tx.h"19#include "peer.h"2021#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)2223static inline24u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)25{26return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);27}2829static inline30enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,31struct hal_rx_desc *desc)32{33if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))34return HAL_ENCRYPT_TYPE_OPEN;3536return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);37}3839static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,40struct hal_rx_desc *desc)41{42return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);43}4445static inline46bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,47struct hal_rx_desc *desc)48{49return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);50}5152static inline53u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,54struct hal_rx_desc *desc)55{56return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);57}5859static inline60bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,61struct hal_rx_desc *desc)62{63return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);64}6566static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,67struct hal_rx_desc *desc)68{69return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);70}7172static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,73struct sk_buff *skb)74{75struct ieee80211_hdr *hdr;7677hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);78return ieee80211_has_morefrags(hdr->frame_control);79}8081static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,82struct sk_buff *skb)83{84struct ieee80211_hdr *hdr;8586hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);87return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;88}8990static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,91struct hal_rx_desc *desc)92{93return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);94}9596static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,97struct hal_rx_desc *desc)98{99return ab->hw_params.hw_ops->rx_desc_get_attention(desc);100}101102static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)103{104return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,105__le32_to_cpu(attn->info2));106}107108static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)109{110return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,111__le32_to_cpu(attn->info1));112}113114static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)115{116return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,117__le32_to_cpu(attn->info1));118}119120static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)121{122return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,123__le32_to_cpu(attn->info2)) ==124RX_DESC_DECRYPT_STATUS_CODE_OK);125}126127static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)128{129u32 info = __le32_to_cpu(attn->info1);130u32 errmap = 0;131132if (info & RX_ATTENTION_INFO1_FCS_ERR)133errmap |= DP_RX_MPDU_ERR_FCS;134135if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)136errmap |= DP_RX_MPDU_ERR_DECRYPT;137138if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)139errmap |= DP_RX_MPDU_ERR_TKIP_MIC;140141if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)142errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;143144if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)145errmap |= DP_RX_MPDU_ERR_OVERFLOW;146147if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)148errmap |= DP_RX_MPDU_ERR_MSDU_LEN;149150if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)151errmap |= DP_RX_MPDU_ERR_MPDU_LEN;152153return errmap;154}155156static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,157struct hal_rx_desc *desc)158{159struct rx_attention *rx_attention;160u32 errmap;161162rx_attention = ath11k_dp_rx_get_attention(ab, desc);163errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);164165return errmap & DP_RX_MPDU_ERR_MSDU_LEN;166}167168static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,169struct hal_rx_desc *desc)170{171return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);172}173174static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,175struct hal_rx_desc *desc)176{177return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);178}179180static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,181struct hal_rx_desc *desc)182{183return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);184}185186static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,187struct hal_rx_desc *desc)188{189return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);190}191192static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,193struct hal_rx_desc *desc)194{195return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);196}197198static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,199struct hal_rx_desc *desc)200{201return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);202}203204static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,205struct hal_rx_desc *desc)206{207return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));208}209210static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,211struct hal_rx_desc *desc)212{213return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);214}215216static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,217struct hal_rx_desc *desc)218{219return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);220}221222static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,223struct hal_rx_desc *desc)224{225return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);226}227228static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,229struct hal_rx_desc *desc)230{231return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);232}233234static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,235struct hal_rx_desc *desc)236{237return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);238}239240static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,241struct hal_rx_desc *fdesc,242struct hal_rx_desc *ldesc)243{244ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);245}246247static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)248{249return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,250__le32_to_cpu(attn->info1));251}252253static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,254struct hal_rx_desc *rx_desc)255{256u8 *rx_pkt_hdr;257258rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);259260return rx_pkt_hdr;261}262263static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,264struct hal_rx_desc *rx_desc)265{266u32 tlv_tag;267268tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);269270return tlv_tag == HAL_RX_MPDU_START;271}272273static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,274struct hal_rx_desc *rx_desc)275{276return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);277}278279static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,280struct hal_rx_desc *desc,281u16 len)282{283ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);284}285286static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,287struct hal_rx_desc *desc)288{289struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);290291return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&292(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,293__le32_to_cpu(attn->info1)));294}295296static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,297struct hal_rx_desc *desc)298{299return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);300}301302static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,303struct hal_rx_desc *desc)304{305return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);306}307308static void ath11k_dp_service_mon_ring(struct timer_list *t)309{310struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer);311int i;312313for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)314ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);315316mod_timer(&ab->mon_reap_timer, jiffies +317msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));318}319320static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)321{322int i, reaped = 0;323unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);324325do {326for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)327reaped += ath11k_dp_rx_process_mon_rings(ab, i,328NULL,329DP_MON_SERVICE_BUDGET);330331/* nothing more to reap */332if (reaped < DP_MON_SERVICE_BUDGET)333return 0;334335} while (time_before(jiffies, timeout));336337ath11k_warn(ab, "dp mon ring purge timeout");338339return -ETIMEDOUT;340}341342/* Returns number of Rx buffers replenished */343int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,344struct dp_rxdma_ring *rx_ring,345int req_entries,346enum hal_rx_buf_return_buf_manager mgr)347{348struct hal_srng *srng;349u32 *desc;350struct sk_buff *skb;351int num_free;352int num_remain;353int buf_id;354u32 cookie;355dma_addr_t paddr;356357req_entries = min(req_entries, rx_ring->bufs_max);358359srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];360361spin_lock_bh(&srng->lock);362363ath11k_hal_srng_access_begin(ab, srng);364365num_free = ath11k_hal_srng_src_num_free(ab, srng, true);366if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))367req_entries = num_free;368369req_entries = min(num_free, req_entries);370num_remain = req_entries;371372while (num_remain > 0) {373skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +374DP_RX_BUFFER_ALIGN_SIZE);375if (!skb)376break;377378if (!IS_ALIGNED((unsigned long)skb->data,379DP_RX_BUFFER_ALIGN_SIZE)) {380skb_pull(skb,381PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -382skb->data);383}384385paddr = dma_map_single(ab->dev, skb->data,386skb->len + skb_tailroom(skb),387DMA_FROM_DEVICE);388if (dma_mapping_error(ab->dev, paddr))389goto fail_free_skb;390391spin_lock_bh(&rx_ring->idr_lock);392buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,393(rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);394spin_unlock_bh(&rx_ring->idr_lock);395if (buf_id <= 0)396goto fail_dma_unmap;397398desc = ath11k_hal_srng_src_get_next_entry(ab, srng);399if (!desc)400goto fail_idr_remove;401402ATH11K_SKB_RXCB(skb)->paddr = paddr;403404cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |405FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);406407num_remain--;408409ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);410}411412ath11k_hal_srng_access_end(ab, srng);413414spin_unlock_bh(&srng->lock);415416return req_entries - num_remain;417418fail_idr_remove:419spin_lock_bh(&rx_ring->idr_lock);420idr_remove(&rx_ring->bufs_idr, buf_id);421spin_unlock_bh(&rx_ring->idr_lock);422fail_dma_unmap:423dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),424DMA_FROM_DEVICE);425fail_free_skb:426dev_kfree_skb_any(skb);427428ath11k_hal_srng_access_end(ab, srng);429430spin_unlock_bh(&srng->lock);431432return req_entries - num_remain;433}434435static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,436struct dp_rxdma_ring *rx_ring)437{438struct sk_buff *skb;439int buf_id;440441spin_lock_bh(&rx_ring->idr_lock);442idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {443idr_remove(&rx_ring->bufs_idr, buf_id);444/* TODO: Understand where internal driver does this dma_unmap445* of rxdma_buffer.446*/447dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,448skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);449dev_kfree_skb_any(skb);450}451452idr_destroy(&rx_ring->bufs_idr);453spin_unlock_bh(&rx_ring->idr_lock);454455return 0;456}457458static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)459{460struct ath11k_pdev_dp *dp = &ar->dp;461struct ath11k_base *ab = ar->ab;462struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;463int i;464465ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);466467rx_ring = &dp->rxdma_mon_buf_ring;468ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);469470for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {471rx_ring = &dp->rx_mon_status_refill_ring[i];472ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);473}474475return 0;476}477478static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,479struct dp_rxdma_ring *rx_ring,480u32 ringtype)481{482struct ath11k_pdev_dp *dp = &ar->dp;483int num_entries;484485num_entries = rx_ring->refill_buf_ring.size /486ath11k_hal_srng_get_entrysize(ar->ab, ringtype);487488rx_ring->bufs_max = num_entries;489ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,490ar->ab->hw_params.hal_params->rx_buf_rbm);491return 0;492}493494static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)495{496struct ath11k_pdev_dp *dp = &ar->dp;497struct ath11k_base *ab = ar->ab;498struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;499int i;500501ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);502503if (ar->ab->hw_params.rxdma1_enable) {504rx_ring = &dp->rxdma_mon_buf_ring;505ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);506}507508for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {509rx_ring = &dp->rx_mon_status_refill_ring[i];510ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);511}512513return 0;514}515516static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)517{518struct ath11k_pdev_dp *dp = &ar->dp;519struct ath11k_base *ab = ar->ab;520int i;521522ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);523524for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {525if (ab->hw_params.rx_mac_buf_ring)526ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);527528ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);529ath11k_dp_srng_cleanup(ab,530&dp->rx_mon_status_refill_ring[i].refill_buf_ring);531}532533ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);534}535536void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)537{538struct ath11k_dp *dp = &ab->dp;539int i;540541for (i = 0; i < DP_REO_DST_RING_MAX; i++)542ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);543}544545int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)546{547struct ath11k_dp *dp = &ab->dp;548int ret;549int i;550551for (i = 0; i < DP_REO_DST_RING_MAX; i++) {552ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],553HAL_REO_DST, i, 0,554DP_REO_DST_RING_SIZE);555if (ret) {556ath11k_warn(ab, "failed to setup reo_dst_ring\n");557goto err_reo_cleanup;558}559}560561return 0;562563err_reo_cleanup:564ath11k_dp_pdev_reo_cleanup(ab);565566return ret;567}568569static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)570{571struct ath11k_pdev_dp *dp = &ar->dp;572struct ath11k_base *ab = ar->ab;573struct dp_srng *srng = NULL;574int i;575int ret;576577ret = ath11k_dp_srng_setup(ar->ab,578&dp->rx_refill_buf_ring.refill_buf_ring,579HAL_RXDMA_BUF, 0,580dp->mac_id, DP_RXDMA_BUF_RING_SIZE);581if (ret) {582ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");583return ret;584}585586if (ar->ab->hw_params.rx_mac_buf_ring) {587for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {588ret = ath11k_dp_srng_setup(ar->ab,589&dp->rx_mac_buf_ring[i],590HAL_RXDMA_BUF, 1,591dp->mac_id + i, 1024);592if (ret) {593ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",594i);595return ret;596}597}598}599600for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {601ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],602HAL_RXDMA_DST, 0, dp->mac_id + i,603DP_RXDMA_ERR_DST_RING_SIZE);604if (ret) {605ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);606return ret;607}608}609610for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {611srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;612ret = ath11k_dp_srng_setup(ar->ab,613srng,614HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,615DP_RXDMA_MON_STATUS_RING_SIZE);616if (ret) {617ath11k_warn(ar->ab,618"failed to setup rx_mon_status_refill_ring %d\n", i);619return ret;620}621}622623/* if rxdma1_enable is false, then it doesn't need624* to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring625* and rxdma_mon_desc_ring.626* init reap timer for QCA6390.627*/628if (!ar->ab->hw_params.rxdma1_enable) {629//init mon status buffer reap timer630timer_setup(&ar->ab->mon_reap_timer,631ath11k_dp_service_mon_ring, 0);632return 0;633}634635ret = ath11k_dp_srng_setup(ar->ab,636&dp->rxdma_mon_buf_ring.refill_buf_ring,637HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,638DP_RXDMA_MONITOR_BUF_RING_SIZE);639if (ret) {640ath11k_warn(ar->ab,641"failed to setup HAL_RXDMA_MONITOR_BUF\n");642return ret;643}644645ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,646HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,647DP_RXDMA_MONITOR_DST_RING_SIZE);648if (ret) {649ath11k_warn(ar->ab,650"failed to setup HAL_RXDMA_MONITOR_DST\n");651return ret;652}653654ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,655HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,656DP_RXDMA_MONITOR_DESC_RING_SIZE);657if (ret) {658ath11k_warn(ar->ab,659"failed to setup HAL_RXDMA_MONITOR_DESC\n");660return ret;661}662663return 0;664}665666void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)667{668struct ath11k_dp *dp = &ab->dp;669struct dp_reo_cmd *cmd, *tmp;670struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;671struct dp_rx_tid *rx_tid;672673spin_lock_bh(&dp->reo_cmd_lock);674list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {675list_del(&cmd->list);676rx_tid = &cmd->data;677if (rx_tid->vaddr_unaligned) {678dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,679rx_tid->vaddr_unaligned,680rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);681rx_tid->vaddr_unaligned = NULL;682}683kfree(cmd);684}685686list_for_each_entry_safe(cmd_cache, tmp_cache,687&dp->reo_cmd_cache_flush_list, list) {688list_del(&cmd_cache->list);689dp->reo_cmd_cache_flush_count--;690rx_tid = &cmd_cache->data;691if (rx_tid->vaddr_unaligned) {692dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,693rx_tid->vaddr_unaligned,694rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);695rx_tid->vaddr_unaligned = NULL;696}697kfree(cmd_cache);698}699spin_unlock_bh(&dp->reo_cmd_lock);700}701702static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,703enum hal_reo_cmd_status status)704{705struct dp_rx_tid *rx_tid = ctx;706707if (status != HAL_REO_CMD_SUCCESS)708ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",709rx_tid->tid, status);710if (rx_tid->vaddr_unaligned) {711dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,712rx_tid->vaddr_unaligned,713rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);714rx_tid->vaddr_unaligned = NULL;715}716}717718static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,719struct dp_rx_tid *rx_tid)720{721struct ath11k_hal_reo_cmd cmd = {};722unsigned long tot_desc_sz, desc_sz;723int ret;724725tot_desc_sz = rx_tid->size;726desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);727728while (tot_desc_sz > desc_sz) {729tot_desc_sz -= desc_sz;730cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);731cmd.addr_hi = upper_32_bits(rx_tid->paddr);732ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,733HAL_REO_CMD_FLUSH_CACHE, &cmd,734NULL);735if (ret)736ath11k_warn(ab,737"failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",738rx_tid->tid, ret);739}740741memset(&cmd, 0, sizeof(cmd));742cmd.addr_lo = lower_32_bits(rx_tid->paddr);743cmd.addr_hi = upper_32_bits(rx_tid->paddr);744cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;745ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,746HAL_REO_CMD_FLUSH_CACHE,747&cmd, ath11k_dp_reo_cmd_free);748if (ret) {749ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",750rx_tid->tid, ret);751dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,752rx_tid->vaddr_unaligned,753rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);754rx_tid->vaddr_unaligned = NULL;755}756}757758static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,759enum hal_reo_cmd_status status)760{761struct ath11k_base *ab = dp->ab;762struct dp_rx_tid *rx_tid = ctx;763struct dp_reo_cache_flush_elem *elem, *tmp;764765if (status == HAL_REO_CMD_DRAIN) {766goto free_desc;767} else if (status != HAL_REO_CMD_SUCCESS) {768/* Shouldn't happen! Cleanup in case of other failure? */769ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",770rx_tid->tid, status);771return;772}773774elem = kzalloc(sizeof(*elem), GFP_ATOMIC);775if (!elem)776goto free_desc;777778elem->ts = jiffies;779memcpy(&elem->data, rx_tid, sizeof(*rx_tid));780781spin_lock_bh(&dp->reo_cmd_lock);782list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);783dp->reo_cmd_cache_flush_count++;784785/* Flush and invalidate aged REO desc from HW cache */786list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,787list) {788if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||789time_after(jiffies, elem->ts +790msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {791list_del(&elem->list);792dp->reo_cmd_cache_flush_count--;793spin_unlock_bh(&dp->reo_cmd_lock);794795ath11k_dp_reo_cache_flush(ab, &elem->data);796kfree(elem);797spin_lock_bh(&dp->reo_cmd_lock);798}799}800spin_unlock_bh(&dp->reo_cmd_lock);801802return;803free_desc:804dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,805rx_tid->vaddr_unaligned,806rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);807rx_tid->vaddr_unaligned = NULL;808}809810void ath11k_peer_rx_tid_delete(struct ath11k *ar,811struct ath11k_peer *peer, u8 tid)812{813struct ath11k_hal_reo_cmd cmd = {};814struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];815int ret;816817if (!rx_tid->active)818return;819820rx_tid->active = false;821822cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;823cmd.addr_lo = lower_32_bits(rx_tid->paddr);824cmd.addr_hi = upper_32_bits(rx_tid->paddr);825cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;826ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,827HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,828ath11k_dp_rx_tid_del_func);829if (ret) {830if (ret != -ESHUTDOWN)831ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",832tid, ret);833dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,834rx_tid->vaddr_unaligned,835rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);836rx_tid->vaddr_unaligned = NULL;837}838839rx_tid->paddr = 0;840rx_tid->paddr_unaligned = 0;841rx_tid->size = 0;842rx_tid->unaligned_size = 0;843}844845static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,846u32 *link_desc,847enum hal_wbm_rel_bm_act action)848{849struct ath11k_dp *dp = &ab->dp;850struct hal_srng *srng;851u32 *desc;852int ret = 0;853854srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];855856spin_lock_bh(&srng->lock);857858ath11k_hal_srng_access_begin(ab, srng);859860desc = ath11k_hal_srng_src_get_next_entry(ab, srng);861if (!desc) {862ret = -ENOBUFS;863goto exit;864}865866ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,867action);868869exit:870ath11k_hal_srng_access_end(ab, srng);871872spin_unlock_bh(&srng->lock);873874return ret;875}876877static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)878{879struct ath11k_base *ab = rx_tid->ab;880881lockdep_assert_held(&ab->base_lock);882883if (rx_tid->dst_ring_desc) {884if (rel_link_desc)885ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,886HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);887kfree(rx_tid->dst_ring_desc);888rx_tid->dst_ring_desc = NULL;889}890891rx_tid->cur_sn = 0;892rx_tid->last_frag_no = 0;893rx_tid->rx_frag_bitmap = 0;894__skb_queue_purge(&rx_tid->rx_frags);895}896897void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)898{899struct dp_rx_tid *rx_tid;900int i;901902lockdep_assert_held(&ar->ab->base_lock);903904for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {905rx_tid = &peer->rx_tid[i];906907spin_unlock_bh(&ar->ab->base_lock);908timer_delete_sync(&rx_tid->frag_timer);909spin_lock_bh(&ar->ab->base_lock);910911ath11k_dp_rx_frags_cleanup(rx_tid, true);912}913}914915void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)916{917struct dp_rx_tid *rx_tid;918int i;919920lockdep_assert_held(&ar->ab->base_lock);921922for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {923rx_tid = &peer->rx_tid[i];924925ath11k_peer_rx_tid_delete(ar, peer, i);926ath11k_dp_rx_frags_cleanup(rx_tid, true);927928spin_unlock_bh(&ar->ab->base_lock);929timer_delete_sync(&rx_tid->frag_timer);930spin_lock_bh(&ar->ab->base_lock);931}932}933934static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,935struct ath11k_peer *peer,936struct dp_rx_tid *rx_tid,937u32 ba_win_sz, u16 ssn,938bool update_ssn)939{940struct ath11k_hal_reo_cmd cmd = {};941int ret;942943cmd.addr_lo = lower_32_bits(rx_tid->paddr);944cmd.addr_hi = upper_32_bits(rx_tid->paddr);945cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;946cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;947cmd.ba_window_size = ba_win_sz;948949if (update_ssn) {950cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;951cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);952}953954ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,955HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,956NULL);957if (ret) {958ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",959rx_tid->tid, ret);960return ret;961}962963rx_tid->ba_win_sz = ba_win_sz;964965return 0;966}967968static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,969const u8 *peer_mac, int vdev_id, u8 tid)970{971struct ath11k_peer *peer;972struct dp_rx_tid *rx_tid;973974spin_lock_bh(&ab->base_lock);975976peer = ath11k_peer_find(ab, vdev_id, peer_mac);977if (!peer) {978ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");979goto unlock_exit;980}981982rx_tid = &peer->rx_tid[tid];983if (!rx_tid->active)984goto unlock_exit;985986dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,987rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);988rx_tid->vaddr_unaligned = NULL;989990rx_tid->active = false;991992unlock_exit:993spin_unlock_bh(&ab->base_lock);994}995996int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,997u8 tid, u32 ba_win_sz, u16 ssn,998enum hal_pn_type pn_type)999{1000struct ath11k_base *ab = ar->ab;1001struct ath11k_peer *peer;1002struct dp_rx_tid *rx_tid;1003u32 hw_desc_sz, *vaddr;1004void *vaddr_unaligned;1005dma_addr_t paddr;1006int ret;10071008spin_lock_bh(&ab->base_lock);10091010peer = ath11k_peer_find(ab, vdev_id, peer_mac);1011if (!peer) {1012ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",1013peer_mac);1014spin_unlock_bh(&ab->base_lock);1015return -ENOENT;1016}10171018rx_tid = &peer->rx_tid[tid];1019/* Update the tid queue if it is already setup */1020if (rx_tid->active) {1021paddr = rx_tid->paddr;1022ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,1023ba_win_sz, ssn, true);1024spin_unlock_bh(&ab->base_lock);1025if (ret) {1026ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",1027peer_mac, tid, ret);1028return ret;1029}10301031ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,1032peer_mac, paddr,1033tid, 1, ba_win_sz);1034if (ret)1035ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",1036peer_mac, tid, ret);1037return ret;1038}10391040rx_tid->tid = tid;10411042rx_tid->ba_win_sz = ba_win_sz;10431044/* TODO: Optimize the memory allocation for qos tid based on1045* the actual BA window size in REO tid update path.1046*/1047if (tid == HAL_DESC_REO_NON_QOS_TID)1048hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);1049else1050hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);10511052rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;1053vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,1054DMA_BIDIRECTIONAL, GFP_ATOMIC);1055if (!vaddr_unaligned) {1056spin_unlock_bh(&ab->base_lock);1057return -ENOMEM;1058}10591060rx_tid->vaddr_unaligned = vaddr_unaligned;1061vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);1062rx_tid->paddr_unaligned = paddr;1063rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -1064(unsigned long)rx_tid->vaddr_unaligned);1065ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);1066rx_tid->size = hw_desc_sz;1067rx_tid->active = true;10681069/* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.1070* Since these changes are not reflected in the device, driver now needs to1071* explicitly call dma_sync_single_for_device.1072*/1073dma_sync_single_for_device(ab->dev, rx_tid->paddr,1074rx_tid->size,1075DMA_TO_DEVICE);1076spin_unlock_bh(&ab->base_lock);10771078ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,1079tid, 1, ba_win_sz);1080if (ret) {1081ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",1082peer_mac, tid, ret);1083ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);1084}10851086return ret;1087}10881089int ath11k_dp_rx_ampdu_start(struct ath11k *ar,1090struct ieee80211_ampdu_params *params)1091{1092struct ath11k_base *ab = ar->ab;1093struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);1094int vdev_id = arsta->arvif->vdev_id;1095int ret;10961097ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,1098params->tid, params->buf_size,1099params->ssn, arsta->pn_type);1100if (ret)1101ath11k_warn(ab, "failed to setup rx tid %d\n", ret);11021103return ret;1104}11051106int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,1107struct ieee80211_ampdu_params *params)1108{1109struct ath11k_base *ab = ar->ab;1110struct ath11k_peer *peer;1111struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);1112int vdev_id = arsta->arvif->vdev_id;1113dma_addr_t paddr;1114bool active;1115int ret;11161117spin_lock_bh(&ab->base_lock);11181119peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);1120if (!peer) {1121ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");1122spin_unlock_bh(&ab->base_lock);1123return -ENOENT;1124}11251126paddr = peer->rx_tid[params->tid].paddr;1127active = peer->rx_tid[params->tid].active;11281129if (!active) {1130spin_unlock_bh(&ab->base_lock);1131return 0;1132}11331134ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);1135spin_unlock_bh(&ab->base_lock);1136if (ret) {1137ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",1138params->tid, ret);1139return ret;1140}11411142ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,1143params->sta->addr, paddr,1144params->tid, 1, 1);1145if (ret)1146ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",1147ret);11481149return ret;1150}11511152int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,1153const u8 *peer_addr,1154enum set_key_cmd key_cmd,1155struct ieee80211_key_conf *key)1156{1157struct ath11k *ar = arvif->ar;1158struct ath11k_base *ab = ar->ab;1159struct ath11k_hal_reo_cmd cmd = {};1160struct ath11k_peer *peer;1161struct dp_rx_tid *rx_tid;1162u8 tid;1163int ret = 0;11641165/* NOTE: Enable PN/TSC replay check offload only for unicast frames.1166* We use mac80211 PN/TSC replay check functionality for bcast/mcast1167* for now.1168*/1169if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))1170return 0;11711172cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;1173cmd.upd0 |= HAL_REO_CMD_UPD0_PN |1174HAL_REO_CMD_UPD0_PN_SIZE |1175HAL_REO_CMD_UPD0_PN_VALID |1176HAL_REO_CMD_UPD0_PN_CHECK |1177HAL_REO_CMD_UPD0_SVLD;11781179switch (key->cipher) {1180case WLAN_CIPHER_SUITE_TKIP:1181case WLAN_CIPHER_SUITE_CCMP:1182case WLAN_CIPHER_SUITE_CCMP_256:1183case WLAN_CIPHER_SUITE_GCMP:1184case WLAN_CIPHER_SUITE_GCMP_256:1185if (key_cmd == SET_KEY) {1186cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;1187cmd.pn_size = 48;1188}1189break;1190default:1191break;1192}11931194spin_lock_bh(&ab->base_lock);11951196peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);1197if (!peer) {1198ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");1199spin_unlock_bh(&ab->base_lock);1200return -ENOENT;1201}12021203for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {1204rx_tid = &peer->rx_tid[tid];1205if (!rx_tid->active)1206continue;1207cmd.addr_lo = lower_32_bits(rx_tid->paddr);1208cmd.addr_hi = upper_32_bits(rx_tid->paddr);1209ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,1210HAL_REO_CMD_UPDATE_RX_QUEUE,1211&cmd, NULL);1212if (ret) {1213ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",1214tid, ret);1215break;1216}1217}12181219spin_unlock_bh(&ab->base_lock);12201221return ret;1222}12231224static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,1225u16 peer_id)1226{1227int i;12281229for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {1230if (ppdu_stats->user_stats[i].is_valid_peer_id) {1231if (peer_id == ppdu_stats->user_stats[i].peer_id)1232return i;1233} else {1234return i;1235}1236}12371238return -EINVAL;1239}12401241static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,1242u16 tag, u16 len, const void *ptr,1243void *data)1244{1245struct htt_ppdu_stats_info *ppdu_info;1246struct htt_ppdu_user_stats *user_stats;1247int cur_user;1248u16 peer_id;12491250ppdu_info = data;12511252switch (tag) {1253case HTT_PPDU_STATS_TAG_COMMON:1254if (len < sizeof(struct htt_ppdu_stats_common)) {1255ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",1256len, tag);1257return -EINVAL;1258}1259memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,1260sizeof(struct htt_ppdu_stats_common));1261break;1262case HTT_PPDU_STATS_TAG_USR_RATE:1263if (len < sizeof(struct htt_ppdu_stats_user_rate)) {1264ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",1265len, tag);1266return -EINVAL;1267}12681269#if defined(__linux__)1270peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;1271#elif defined(__FreeBSD__)1272peer_id = ((const struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;1273#endif1274cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,1275peer_id);1276if (cur_user < 0)1277return -EINVAL;1278user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];1279user_stats->peer_id = peer_id;1280user_stats->is_valid_peer_id = true;1281memcpy((void *)&user_stats->rate, ptr,1282sizeof(struct htt_ppdu_stats_user_rate));1283user_stats->tlv_flags |= BIT(tag);1284break;1285case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:1286if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {1287ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",1288len, tag);1289return -EINVAL;1290}12911292#if defined(__linux__)1293peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;1294#elif defined(__FreeBSD__)1295peer_id = ((const struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;1296#endif1297cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,1298peer_id);1299if (cur_user < 0)1300return -EINVAL;1301user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];1302user_stats->peer_id = peer_id;1303user_stats->is_valid_peer_id = true;1304memcpy((void *)&user_stats->cmpltn_cmn, ptr,1305sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));1306user_stats->tlv_flags |= BIT(tag);1307break;1308case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:1309if (len <1310sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {1311ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",1312len, tag);1313return -EINVAL;1314}13151316peer_id =1317#if defined(__linux__)1318((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;1319#elif defined(__FreeBSD__)1320((const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;1321#endif1322cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,1323peer_id);1324if (cur_user < 0)1325return -EINVAL;1326user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];1327user_stats->peer_id = peer_id;1328user_stats->is_valid_peer_id = true;1329memcpy((void *)&user_stats->ack_ba, ptr,1330sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));1331user_stats->tlv_flags |= BIT(tag);1332break;1333}1334return 0;1335}13361337#if defined(__linux__)1338int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,1339#elif defined(__FreeBSD__)1340int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const u8 *ptr, size_t len,1341#endif1342int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,1343const void *ptr, void *data),1344void *data)1345{1346const struct htt_tlv *tlv;1347#if defined(__linux__)1348const void *begin = ptr;1349#elif defined(__FreeBSD__)1350const u8 *begin = ptr;1351#endif1352u16 tlv_tag, tlv_len;1353int ret = -EINVAL;13541355while (len > 0) {1356if (len < sizeof(*tlv)) {1357ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",1358ptr - begin, len, sizeof(*tlv));1359return -EINVAL;1360}1361#if defined(__linux__)1362tlv = (struct htt_tlv *)ptr;1363#elif defined(__FreeBSD__)1364tlv = (const struct htt_tlv *)(const void *)ptr;1365#endif1366tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);1367tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);1368ptr += sizeof(*tlv);1369len -= sizeof(*tlv);13701371if (tlv_len > len) {1372ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",1373tlv_tag, ptr - begin, len, tlv_len);1374return -EINVAL;1375}1376ret = iter(ab, tlv_tag, tlv_len, ptr, data);1377if (ret == -ENOMEM)1378return ret;13791380ptr += tlv_len;1381len -= tlv_len;1382}1383return 0;1384}13851386static void1387ath11k_update_per_peer_tx_stats(struct ath11k *ar,1388struct htt_ppdu_stats *ppdu_stats, u8 user)1389{1390struct ath11k_base *ab = ar->ab;1391struct ath11k_peer *peer;1392struct ieee80211_sta *sta;1393struct ath11k_sta *arsta;1394struct htt_ppdu_stats_user_rate *user_rate;1395struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;1396struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];1397struct htt_ppdu_stats_common *common = &ppdu_stats->common;1398int ret;1399u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;1400u32 succ_bytes = 0;1401u16 rate = 0, succ_pkts = 0;1402u32 tx_duration = 0;1403u8 tid = HTT_PPDU_STATS_NON_QOS_TID;1404bool is_ampdu = false;14051406if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))1407return;14081409if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))1410is_ampdu =1411HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);14121413if (usr_stats->tlv_flags &1414BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {1415succ_bytes = usr_stats->ack_ba.success_bytes;1416succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,1417usr_stats->ack_ba.info);1418tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,1419usr_stats->ack_ba.info);1420}14211422if (common->fes_duration_us)1423tx_duration = common->fes_duration_us;14241425user_rate = &usr_stats->rate;1426flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);1427bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;1428nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;1429mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);1430sgi = HTT_USR_RATE_GI(user_rate->rate_flags);1431dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);14321433/* Note: If host configured fixed rates and in some other special1434* cases, the broadcast/management frames are sent in different rates.1435* Firmware rate's control to be skipped for this?1436*/14371438if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {1439ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);1440return;1441}14421443if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {1444ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);1445return;1446}14471448if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {1449ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",1450mcs, nss);1451return;1452}14531454if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {1455ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,1456flags,1457&rate_idx,1458&rate);1459if (ret < 0)1460return;1461}14621463rcu_read_lock();1464spin_lock_bh(&ab->base_lock);1465peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);14661467if (!peer || !peer->sta) {1468spin_unlock_bh(&ab->base_lock);1469rcu_read_unlock();1470return;1471}14721473sta = peer->sta;1474arsta = ath11k_sta_to_arsta(sta);14751476memset(&arsta->txrate, 0, sizeof(arsta->txrate));14771478switch (flags) {1479case WMI_RATE_PREAMBLE_OFDM:1480arsta->txrate.legacy = rate;1481break;1482case WMI_RATE_PREAMBLE_CCK:1483arsta->txrate.legacy = rate;1484break;1485case WMI_RATE_PREAMBLE_HT:1486arsta->txrate.mcs = mcs + 8 * (nss - 1);1487arsta->txrate.flags = RATE_INFO_FLAGS_MCS;1488if (sgi)1489arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;1490break;1491case WMI_RATE_PREAMBLE_VHT:1492arsta->txrate.mcs = mcs;1493arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;1494if (sgi)1495arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;1496break;1497case WMI_RATE_PREAMBLE_HE:1498arsta->txrate.mcs = mcs;1499arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;1500arsta->txrate.he_dcm = dcm;1501arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);1502arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc1503((user_rate->ru_end -1504user_rate->ru_start) + 1);1505break;1506}15071508arsta->txrate.nss = nss;15091510arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);1511arsta->tx_duration += tx_duration;1512memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));15131514/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.1515* So skip peer stats update for mgmt packets.1516*/1517if (tid < HTT_PPDU_STATS_NON_QOS_TID) {1518memset(peer_stats, 0, sizeof(*peer_stats));1519peer_stats->succ_pkts = succ_pkts;1520peer_stats->succ_bytes = succ_bytes;1521peer_stats->is_ampdu = is_ampdu;1522peer_stats->duration = tx_duration;1523peer_stats->ba_fails =1524HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +1525HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);15261527if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))1528ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);1529}15301531spin_unlock_bh(&ab->base_lock);1532rcu_read_unlock();1533}15341535static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,1536struct htt_ppdu_stats *ppdu_stats)1537{1538u8 user;15391540for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)1541ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);1542}15431544static1545struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,1546u32 ppdu_id)1547{1548struct htt_ppdu_stats_info *ppdu_info;15491550lockdep_assert_held(&ar->data_lock);15511552if (!list_empty(&ar->ppdu_stats_info)) {1553list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {1554if (ppdu_info->ppdu_id == ppdu_id)1555return ppdu_info;1556}15571558if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {1559ppdu_info = list_first_entry(&ar->ppdu_stats_info,1560typeof(*ppdu_info), list);1561list_del(&ppdu_info->list);1562ar->ppdu_stat_list_depth--;1563ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);1564kfree(ppdu_info);1565}1566}15671568ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);1569if (!ppdu_info)1570return NULL;15711572list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);1573ar->ppdu_stat_list_depth++;15741575return ppdu_info;1576}15771578static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,1579struct sk_buff *skb)1580{1581struct ath11k_htt_ppdu_stats_msg *msg;1582struct htt_ppdu_stats_info *ppdu_info;1583struct ath11k *ar;1584int ret;1585u8 pdev_id;1586u32 ppdu_id, len;15871588msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;1589len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);1590pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);1591ppdu_id = msg->ppdu_id;15921593rcu_read_lock();1594ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);1595if (!ar) {1596ret = -EINVAL;1597goto out;1598}15991600if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))1601trace_ath11k_htt_ppdu_stats(ar, skb->data, len);16021603spin_lock_bh(&ar->data_lock);1604ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);1605if (!ppdu_info) {1606ret = -EINVAL;1607goto out_unlock_data;1608}16091610ppdu_info->ppdu_id = ppdu_id;1611ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,1612ath11k_htt_tlv_ppdu_stats_parse,1613(void *)ppdu_info);1614if (ret) {1615ath11k_warn(ab, "Failed to parse tlv %d\n", ret);1616goto out_unlock_data;1617}16181619out_unlock_data:1620spin_unlock_bh(&ar->data_lock);16211622out:1623rcu_read_unlock();16241625return ret;1626}16271628static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)1629{1630struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;1631struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;1632struct ath11k *ar;1633u8 pdev_id;16341635pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);16361637rcu_read_lock();16381639ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);1640if (!ar) {1641ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);1642goto out;1643}16441645trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,1646ar->ab->pktlog_defs_checksum);16471648out:1649rcu_read_unlock();1650}16511652static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,1653struct sk_buff *skb)1654{1655u32 *data = (u32 *)skb->data;1656u8 pdev_id, ring_type, ring_id, pdev_idx;1657u16 hp, tp;1658u32 backpressure_time;1659struct ath11k_bp_stats *bp_stats;16601661pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);1662ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);1663ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);1664++data;16651666hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);1667tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);1668++data;16691670backpressure_time = *data;16711672ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",1673pdev_id, ring_type, ring_id, hp, tp, backpressure_time);16741675if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {1676if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)1677return;16781679bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];1680} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {1681pdev_idx = DP_HW2SW_MACID(pdev_id);16821683if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)1684return;16851686bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];1687} else {1688ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",1689ring_type);1690return;1691}16921693spin_lock_bh(&ab->base_lock);1694bp_stats->hp = hp;1695bp_stats->tp = tp;1696bp_stats->count++;1697bp_stats->jiffies = jiffies;1698spin_unlock_bh(&ab->base_lock);1699}17001701void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,1702struct sk_buff *skb)1703{1704struct ath11k_dp *dp = &ab->dp;1705struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;1706enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);1707u16 peer_id;1708u8 vdev_id;1709u8 mac_addr[ETH_ALEN];1710u16 peer_mac_h16;1711u16 ast_hash;1712u16 hw_peer_id;17131714ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);17151716switch (type) {1717case HTT_T2H_MSG_TYPE_VERSION_CONF:1718dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,1719resp->version_msg.version);1720dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,1721resp->version_msg.version);1722complete(&dp->htt_tgt_version_received);1723break;1724case HTT_T2H_MSG_TYPE_PEER_MAP:1725vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,1726resp->peer_map_ev.info);1727peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,1728resp->peer_map_ev.info);1729peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,1730resp->peer_map_ev.info1);1731ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,1732peer_mac_h16, mac_addr);1733ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);1734break;1735case HTT_T2H_MSG_TYPE_PEER_MAP2:1736vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,1737resp->peer_map_ev.info);1738peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,1739resp->peer_map_ev.info);1740peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,1741resp->peer_map_ev.info1);1742ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,1743peer_mac_h16, mac_addr);1744ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,1745resp->peer_map_ev.info2);1746hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,1747resp->peer_map_ev.info1);1748ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,1749hw_peer_id);1750break;1751case HTT_T2H_MSG_TYPE_PEER_UNMAP:1752case HTT_T2H_MSG_TYPE_PEER_UNMAP2:1753peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,1754resp->peer_unmap_ev.info);1755ath11k_peer_unmap_event(ab, peer_id);1756break;1757case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:1758ath11k_htt_pull_ppdu_stats(ab, skb);1759break;1760case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:1761ath11k_debugfs_htt_ext_stats_handler(ab, skb);1762break;1763case HTT_T2H_MSG_TYPE_PKTLOG:1764ath11k_htt_pktlog(ab, skb);1765break;1766case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:1767ath11k_htt_backpressure_event_handler(ab, skb);1768break;1769default:1770ath11k_warn(ab, "htt event %d not handled\n", type);1771break;1772}17731774dev_kfree_skb_any(skb);1775}17761777static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,1778struct sk_buff_head *msdu_list,1779struct sk_buff *first, struct sk_buff *last,1780u8 l3pad_bytes, int msdu_len)1781{1782struct ath11k_base *ab = ar->ab;1783struct sk_buff *skb;1784struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);1785int buf_first_hdr_len, buf_first_len;1786struct hal_rx_desc *ldesc;1787int space_extra, rem_len, buf_len;1788u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;17891790/* As the msdu is spread across multiple rx buffers,1791* find the offset to the start of msdu for computing1792* the length of the msdu in the first buffer.1793*/1794buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;1795buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;17961797if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {1798skb_put(first, buf_first_hdr_len + msdu_len);1799skb_pull(first, buf_first_hdr_len);1800return 0;1801}18021803ldesc = (struct hal_rx_desc *)last->data;1804rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);1805rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);18061807/* MSDU spans over multiple buffers because the length of the MSDU1808* exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data1809* in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.1810*/1811skb_put(first, DP_RX_BUFFER_SIZE);1812skb_pull(first, buf_first_hdr_len);18131814/* When an MSDU spread over multiple buffers attention, MSDU_END and1815* MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.1816*/1817ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);18181819space_extra = msdu_len - (buf_first_len + skb_tailroom(first));1820if (space_extra > 0 &&1821(pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {1822/* Free up all buffers of the MSDU */1823while ((skb = __skb_dequeue(msdu_list)) != NULL) {1824rxcb = ATH11K_SKB_RXCB(skb);1825if (!rxcb->is_continuation) {1826dev_kfree_skb_any(skb);1827break;1828}1829dev_kfree_skb_any(skb);1830}1831return -ENOMEM;1832}18331834rem_len = msdu_len - buf_first_len;1835while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {1836rxcb = ATH11K_SKB_RXCB(skb);1837if (rxcb->is_continuation)1838buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;1839else1840buf_len = rem_len;18411842if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {1843WARN_ON_ONCE(1);1844dev_kfree_skb_any(skb);1845return -EINVAL;1846}18471848skb_put(skb, buf_len + hal_rx_desc_sz);1849skb_pull(skb, hal_rx_desc_sz);1850skb_copy_from_linear_data(skb, skb_put(first, buf_len),1851buf_len);1852dev_kfree_skb_any(skb);18531854rem_len -= buf_len;1855if (!rxcb->is_continuation)1856break;1857}18581859return 0;1860}18611862static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,1863struct sk_buff *first)1864{1865struct sk_buff *skb;1866struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);18671868if (!rxcb->is_continuation)1869return first;18701871skb_queue_walk(msdu_list, skb) {1872rxcb = ATH11K_SKB_RXCB(skb);1873if (!rxcb->is_continuation)1874return skb;1875}18761877return NULL;1878}18791880static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)1881{1882struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);1883struct rx_attention *rx_attention;1884bool ip_csum_fail, l4_csum_fail;18851886rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);1887ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);1888l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);18891890msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?1891CHECKSUM_NONE : CHECKSUM_UNNECESSARY;1892}18931894int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)1895{1896switch (enctype) {1897case HAL_ENCRYPT_TYPE_OPEN:1898case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:1899case HAL_ENCRYPT_TYPE_TKIP_MIC:1900return 0;1901case HAL_ENCRYPT_TYPE_CCMP_128:1902return IEEE80211_CCMP_MIC_LEN;1903case HAL_ENCRYPT_TYPE_CCMP_256:1904return IEEE80211_CCMP_256_MIC_LEN;1905case HAL_ENCRYPT_TYPE_GCMP_128:1906case HAL_ENCRYPT_TYPE_AES_GCMP_256:1907return IEEE80211_GCMP_MIC_LEN;1908case HAL_ENCRYPT_TYPE_WEP_40:1909case HAL_ENCRYPT_TYPE_WEP_104:1910case HAL_ENCRYPT_TYPE_WEP_128:1911case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:1912case HAL_ENCRYPT_TYPE_WAPI:1913break;1914}19151916ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);1917return 0;1918}19191920static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,1921enum hal_encrypt_type enctype)1922{1923switch (enctype) {1924case HAL_ENCRYPT_TYPE_OPEN:1925return 0;1926case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:1927case HAL_ENCRYPT_TYPE_TKIP_MIC:1928return IEEE80211_TKIP_IV_LEN;1929case HAL_ENCRYPT_TYPE_CCMP_128:1930return IEEE80211_CCMP_HDR_LEN;1931case HAL_ENCRYPT_TYPE_CCMP_256:1932return IEEE80211_CCMP_256_HDR_LEN;1933case HAL_ENCRYPT_TYPE_GCMP_128:1934case HAL_ENCRYPT_TYPE_AES_GCMP_256:1935return IEEE80211_GCMP_HDR_LEN;1936case HAL_ENCRYPT_TYPE_WEP_40:1937case HAL_ENCRYPT_TYPE_WEP_104:1938case HAL_ENCRYPT_TYPE_WEP_128:1939case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:1940case HAL_ENCRYPT_TYPE_WAPI:1941break;1942}19431944ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);1945return 0;1946}19471948static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,1949enum hal_encrypt_type enctype)1950{1951switch (enctype) {1952case HAL_ENCRYPT_TYPE_OPEN:1953case HAL_ENCRYPT_TYPE_CCMP_128:1954case HAL_ENCRYPT_TYPE_CCMP_256:1955case HAL_ENCRYPT_TYPE_GCMP_128:1956case HAL_ENCRYPT_TYPE_AES_GCMP_256:1957return 0;1958case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:1959case HAL_ENCRYPT_TYPE_TKIP_MIC:1960return IEEE80211_TKIP_ICV_LEN;1961case HAL_ENCRYPT_TYPE_WEP_40:1962case HAL_ENCRYPT_TYPE_WEP_104:1963case HAL_ENCRYPT_TYPE_WEP_128:1964case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:1965case HAL_ENCRYPT_TYPE_WAPI:1966break;1967}19681969ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);1970return 0;1971}19721973static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,1974struct sk_buff *msdu,1975u8 *first_hdr,1976enum hal_encrypt_type enctype,1977struct ieee80211_rx_status *status)1978{1979struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);1980u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];1981struct ieee80211_hdr *hdr;1982size_t hdr_len;1983u8 da[ETH_ALEN];1984u8 sa[ETH_ALEN];1985u16 qos_ctl = 0;1986u8 *qos;19871988/* copy SA & DA and pull decapped header */1989hdr = (struct ieee80211_hdr *)msdu->data;1990hdr_len = ieee80211_hdrlen(hdr->frame_control);1991ether_addr_copy(da, ieee80211_get_DA(hdr));1992ether_addr_copy(sa, ieee80211_get_SA(hdr));1993skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));19941995if (rxcb->is_first_msdu) {1996/* original 802.11 header is valid for the first msdu1997* hence we can reuse the same header1998*/1999hdr = (struct ieee80211_hdr *)first_hdr;2000hdr_len = ieee80211_hdrlen(hdr->frame_control);20012002/* Each A-MSDU subframe will be reported as a separate MSDU,2003* so strip the A-MSDU bit from QoS Ctl.2004*/2005if (ieee80211_is_data_qos(hdr->frame_control)) {2006qos = ieee80211_get_qos_ctl(hdr);2007qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;2008}2009} else {2010/* Rebuild qos header if this is a middle/last msdu */2011hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);20122013/* Reset the order bit as the HT_Control header is stripped */2014hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));20152016qos_ctl = rxcb->tid;20172018if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))2019qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;20202021/* TODO Add other QoS ctl fields when required */20222023/* copy decap header before overwriting for reuse below */2024memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);2025}20262027if (!(status->flag & RX_FLAG_IV_STRIPPED)) {2028memcpy(skb_push(msdu,2029ath11k_dp_rx_crypto_param_len(ar, enctype)),2030#if defined(__linux__)2031(void *)hdr + hdr_len,2032#elif defined(__FreeBSD__)2033(u8 *)hdr + hdr_len,2034#endif2035ath11k_dp_rx_crypto_param_len(ar, enctype));2036}20372038if (!rxcb->is_first_msdu) {2039memcpy(skb_push(msdu,2040IEEE80211_QOS_CTL_LEN), &qos_ctl,2041IEEE80211_QOS_CTL_LEN);2042memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);2043return;2044}20452046memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);20472048/* original 802.11 header has a different DA and in2049* case of 4addr it may also have different SA2050*/2051hdr = (struct ieee80211_hdr *)msdu->data;2052ether_addr_copy(ieee80211_get_DA(hdr), da);2053ether_addr_copy(ieee80211_get_SA(hdr), sa);2054}20552056static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,2057enum hal_encrypt_type enctype,2058struct ieee80211_rx_status *status,2059bool decrypted)2060{2061struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);2062struct ieee80211_hdr *hdr;2063size_t hdr_len;2064size_t crypto_len;20652066if (!rxcb->is_first_msdu ||2067!(rxcb->is_first_msdu && rxcb->is_last_msdu)) {2068WARN_ON_ONCE(1);2069return;2070}20712072skb_trim(msdu, msdu->len - FCS_LEN);20732074if (!decrypted)2075return;20762077hdr = (void *)msdu->data;20782079/* Tail */2080if (status->flag & RX_FLAG_IV_STRIPPED) {2081skb_trim(msdu, msdu->len -2082ath11k_dp_rx_crypto_mic_len(ar, enctype));20832084skb_trim(msdu, msdu->len -2085ath11k_dp_rx_crypto_icv_len(ar, enctype));2086} else {2087/* MIC */2088if (status->flag & RX_FLAG_MIC_STRIPPED)2089skb_trim(msdu, msdu->len -2090ath11k_dp_rx_crypto_mic_len(ar, enctype));20912092/* ICV */2093if (status->flag & RX_FLAG_ICV_STRIPPED)2094skb_trim(msdu, msdu->len -2095ath11k_dp_rx_crypto_icv_len(ar, enctype));2096}20972098/* MMIC */2099if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&2100!ieee80211_has_morefrags(hdr->frame_control) &&2101enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)2102skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);21032104/* Head */2105if (status->flag & RX_FLAG_IV_STRIPPED) {2106hdr_len = ieee80211_hdrlen(hdr->frame_control);2107crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);21082109#if defined(__linux__)2110memmove((void *)msdu->data + crypto_len,2111(void *)msdu->data, hdr_len);2112#elif defined(__FreeBSD__)2113memmove((u8 *)msdu->data + crypto_len,2114(u8 *)msdu->data, hdr_len);2115#endif2116skb_pull(msdu, crypto_len);2117}2118}21192120static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,2121struct sk_buff *msdu,2122enum hal_encrypt_type enctype)2123{2124struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);2125struct ieee80211_hdr *hdr;2126size_t hdr_len, crypto_len;2127#if defined(__linux__)2128void *rfc1042;2129#elif defined(__FreeBSD__)2130u8 *rfc1042;2131#endif2132bool is_amsdu;21332134is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);2135hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);2136#if defined(__linux__)2137rfc1042 = hdr;2138#elif defined(__FreeBSD__)2139rfc1042 = (void *)hdr;2140#endif21412142if (rxcb->is_first_msdu) {2143hdr_len = ieee80211_hdrlen(hdr->frame_control);2144crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);21452146rfc1042 += hdr_len + crypto_len;2147}21482149if (is_amsdu)2150rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);21512152return rfc1042;2153}21542155static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,2156struct sk_buff *msdu,2157u8 *first_hdr,2158enum hal_encrypt_type enctype,2159struct ieee80211_rx_status *status)2160{2161struct ieee80211_hdr *hdr;2162struct ethhdr *eth;2163size_t hdr_len;2164u8 da[ETH_ALEN];2165u8 sa[ETH_ALEN];2166void *rfc1042;21672168rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);2169if (WARN_ON_ONCE(!rfc1042))2170return;21712172/* pull decapped header and copy SA & DA */2173eth = (struct ethhdr *)msdu->data;2174ether_addr_copy(da, eth->h_dest);2175ether_addr_copy(sa, eth->h_source);2176skb_pull(msdu, sizeof(struct ethhdr));21772178/* push rfc1042/llc/snap */2179memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,2180sizeof(struct ath11k_dp_rfc1042_hdr));21812182/* push original 802.11 header */2183hdr = (struct ieee80211_hdr *)first_hdr;2184hdr_len = ieee80211_hdrlen(hdr->frame_control);21852186if (!(status->flag & RX_FLAG_IV_STRIPPED)) {2187memcpy(skb_push(msdu,2188ath11k_dp_rx_crypto_param_len(ar, enctype)),2189#if defined(__linux__)2190(void *)hdr + hdr_len,2191#elif defined(__FreeBSD__)2192(u8 *)hdr + hdr_len,2193#endif2194ath11k_dp_rx_crypto_param_len(ar, enctype));2195}21962197memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);21982199/* original 802.11 header has a different DA and in2200* case of 4addr it may also have different SA2201*/2202hdr = (struct ieee80211_hdr *)msdu->data;2203ether_addr_copy(ieee80211_get_DA(hdr), da);2204ether_addr_copy(ieee80211_get_SA(hdr), sa);2205}22062207static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,2208struct hal_rx_desc *rx_desc,2209enum hal_encrypt_type enctype,2210struct ieee80211_rx_status *status,2211bool decrypted)2212{2213u8 *first_hdr;2214u8 decap;2215struct ethhdr *ehdr;22162217first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);2218decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);22192220switch (decap) {2221case DP_RX_DECAP_TYPE_NATIVE_WIFI:2222ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,2223enctype, status);2224break;2225case DP_RX_DECAP_TYPE_RAW:2226ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,2227decrypted);2228break;2229case DP_RX_DECAP_TYPE_ETHERNET2_DIX:2230ehdr = (struct ethhdr *)msdu->data;22312232/* mac80211 allows fast path only for authorized STA */2233if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {2234ATH11K_SKB_RXCB(msdu)->is_eapol = true;2235ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,2236enctype, status);2237break;2238}22392240/* PN for mcast packets will be validated in mac80211;2241* remove eth header and add 802.11 header.2242*/2243if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)2244ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,2245enctype, status);2246break;2247case DP_RX_DECAP_TYPE_8023:2248/* TODO: Handle undecap for these formats */2249break;2250}2251}22522253static struct ath11k_peer *2254ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)2255{2256struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);2257struct hal_rx_desc *rx_desc = rxcb->rx_desc;2258struct ath11k_peer *peer = NULL;22592260lockdep_assert_held(&ab->base_lock);22612262if (rxcb->peer_id)2263peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);22642265if (peer)2266return peer;22672268if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))2269return NULL;22702271peer = ath11k_peer_find_by_addr(ab,2272ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));2273return peer;2274}22752276static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,2277struct sk_buff *msdu,2278struct hal_rx_desc *rx_desc,2279struct ieee80211_rx_status *rx_status)2280{2281bool fill_crypto_hdr;2282enum hal_encrypt_type enctype;2283bool is_decrypted = false;2284struct ath11k_skb_rxcb *rxcb;2285struct ieee80211_hdr *hdr;2286struct ath11k_peer *peer;2287struct rx_attention *rx_attention;2288u32 err_bitmap;22892290/* PN for multicast packets will be checked in mac80211 */2291rxcb = ATH11K_SKB_RXCB(msdu);2292fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);2293rxcb->is_mcbc = fill_crypto_hdr;22942295if (rxcb->is_mcbc) {2296rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);2297rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);2298}22992300spin_lock_bh(&ar->ab->base_lock);2301peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);2302if (peer) {2303if (rxcb->is_mcbc)2304enctype = peer->sec_type_grp;2305else2306enctype = peer->sec_type;2307} else {2308enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);2309}2310spin_unlock_bh(&ar->ab->base_lock);23112312rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);2313err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);2314if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)2315is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);23162317/* Clear per-MPDU flags while leaving per-PPDU flags intact */2318rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |2319RX_FLAG_MMIC_ERROR |2320RX_FLAG_DECRYPTED |2321RX_FLAG_IV_STRIPPED |2322RX_FLAG_MMIC_STRIPPED);23232324if (err_bitmap & DP_RX_MPDU_ERR_FCS)2325rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;2326if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)2327rx_status->flag |= RX_FLAG_MMIC_ERROR;23282329if (is_decrypted) {2330rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;23312332if (fill_crypto_hdr)2333rx_status->flag |= RX_FLAG_MIC_STRIPPED |2334RX_FLAG_ICV_STRIPPED;2335else2336rx_status->flag |= RX_FLAG_IV_STRIPPED |2337RX_FLAG_PN_VALIDATED;2338}23392340ath11k_dp_rx_h_csum_offload(ar, msdu);2341ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,2342enctype, rx_status, is_decrypted);23432344if (!is_decrypted || fill_crypto_hdr)2345return;23462347if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=2348DP_RX_DECAP_TYPE_ETHERNET2_DIX) {2349hdr = (void *)msdu->data;2350hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);2351}2352}23532354static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,2355struct ieee80211_rx_status *rx_status)2356{2357struct ieee80211_supported_band *sband;2358enum rx_msdu_start_pkt_type pkt_type;2359u8 bw;2360u8 rate_mcs, nss;2361u8 sgi;2362bool is_cck, is_ldpc;23632364pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);2365bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);2366rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);2367nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);2368sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);23692370switch (pkt_type) {2371case RX_MSDU_START_PKT_TYPE_11A:2372case RX_MSDU_START_PKT_TYPE_11B:2373is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);2374sband = &ar->mac.sbands[rx_status->band];2375rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,2376is_cck);2377break;2378case RX_MSDU_START_PKT_TYPE_11N:2379rx_status->encoding = RX_ENC_HT;2380if (rate_mcs > ATH11K_HT_MCS_MAX) {2381ath11k_warn(ar->ab,2382"Received with invalid mcs in HT mode %d\n",2383rate_mcs);2384break;2385}2386rx_status->rate_idx = rate_mcs + (8 * (nss - 1));2387if (sgi)2388rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;2389rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);2390break;2391case RX_MSDU_START_PKT_TYPE_11AC:2392rx_status->encoding = RX_ENC_VHT;2393rx_status->rate_idx = rate_mcs;2394if (rate_mcs > ATH11K_VHT_MCS_MAX) {2395ath11k_warn(ar->ab,2396"Received with invalid mcs in VHT mode %d\n",2397rate_mcs);2398break;2399}2400rx_status->nss = nss;2401if (sgi)2402rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;2403rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);2404is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);2405if (is_ldpc)2406rx_status->enc_flags |= RX_ENC_FLAG_LDPC;2407break;2408case RX_MSDU_START_PKT_TYPE_11AX:2409rx_status->rate_idx = rate_mcs;2410if (rate_mcs > ATH11K_HE_MCS_MAX) {2411ath11k_warn(ar->ab,2412"Received with invalid mcs in HE mode %d\n",2413rate_mcs);2414break;2415}2416rx_status->encoding = RX_ENC_HE;2417rx_status->nss = nss;2418rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);2419rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);2420break;2421}2422}24232424static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,2425struct ieee80211_rx_status *rx_status)2426{2427u8 channel_num;2428u32 center_freq, meta_data;2429struct ieee80211_channel *channel;24302431rx_status->freq = 0;2432rx_status->rate_idx = 0;2433rx_status->nss = 0;2434rx_status->encoding = RX_ENC_LEGACY;2435rx_status->bw = RATE_INFO_BW_20;24362437rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;24382439meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);2440channel_num = meta_data;2441center_freq = meta_data >> 16;24422443if (center_freq >= ATH11K_MIN_6G_FREQ &&2444center_freq <= ATH11K_MAX_6G_FREQ) {2445rx_status->band = NL80211_BAND_6GHZ;2446rx_status->freq = center_freq;2447} else if (channel_num >= 1 && channel_num <= 14) {2448rx_status->band = NL80211_BAND_2GHZ;2449} else if (channel_num >= 36 && channel_num <= 177) {2450rx_status->band = NL80211_BAND_5GHZ;2451} else {2452spin_lock_bh(&ar->data_lock);2453channel = ar->rx_channel;2454if (channel) {2455rx_status->band = channel->band;2456channel_num =2457ieee80211_frequency_to_channel(channel->center_freq);2458}2459spin_unlock_bh(&ar->data_lock);2460ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",2461rx_desc, sizeof(struct hal_rx_desc));2462}24632464if (rx_status->band != NL80211_BAND_6GHZ)2465rx_status->freq = ieee80211_channel_to_frequency(channel_num,2466rx_status->band);24672468ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);2469}24702471static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,2472struct sk_buff *msdu,2473struct ieee80211_rx_status *status)2474{2475static const struct ieee80211_radiotap_he known = {2476.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |2477IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),2478.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),2479};2480struct ieee80211_rx_status *rx_status;2481struct ieee80211_radiotap_he *he = NULL;2482struct ieee80211_sta *pubsta = NULL;2483struct ath11k_peer *peer;2484struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);2485u8 decap = DP_RX_DECAP_TYPE_RAW;2486bool is_mcbc = rxcb->is_mcbc;2487bool is_eapol = rxcb->is_eapol;24882489if (status->encoding == RX_ENC_HE &&2490!(status->flag & RX_FLAG_RADIOTAP_HE) &&2491!(status->flag & RX_FLAG_SKIP_MONITOR)) {2492he = skb_push(msdu, sizeof(known));2493memcpy(he, &known, sizeof(known));2494status->flag |= RX_FLAG_RADIOTAP_HE;2495}24962497if (!(status->flag & RX_FLAG_ONLY_MONITOR))2498decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);24992500spin_lock_bh(&ar->ab->base_lock);2501peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);2502if (peer && peer->sta)2503pubsta = peer->sta;2504spin_unlock_bh(&ar->ab->base_lock);25052506ath11k_dbg(ar->ab, ATH11K_DBG_DATA,2507"rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",2508msdu,2509msdu->len,2510peer ? peer->addr : NULL,2511rxcb->tid,2512is_mcbc ? "mcast" : "ucast",2513rxcb->seq_no,2514(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",2515(status->encoding == RX_ENC_HT) ? "ht" : "",2516(status->encoding == RX_ENC_VHT) ? "vht" : "",2517(status->encoding == RX_ENC_HE) ? "he" : "",2518(status->bw == RATE_INFO_BW_40) ? "40" : "",2519(status->bw == RATE_INFO_BW_80) ? "80" : "",2520(status->bw == RATE_INFO_BW_160) ? "160" : "",2521status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",2522status->rate_idx,2523status->nss,2524status->freq,2525status->band, status->flag,2526!!(status->flag & RX_FLAG_FAILED_FCS_CRC),2527!!(status->flag & RX_FLAG_MMIC_ERROR),2528!!(status->flag & RX_FLAG_AMSDU_MORE));25292530ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",2531msdu->data, msdu->len);25322533rx_status = IEEE80211_SKB_RXCB(msdu);2534*rx_status = *status;25352536/* TODO: trace rx packet */25372538/* PN for multicast packets are not validate in HW,2539* so skip 802.3 rx path2540* Also, fast_rx expects the STA to be authorized, hence2541* eapol packets are sent in slow path.2542*/2543if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&2544!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))2545rx_status->flag |= RX_FLAG_8023;25462547ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);2548}25492550static int ath11k_dp_rx_process_msdu(struct ath11k *ar,2551struct sk_buff *msdu,2552struct sk_buff_head *msdu_list,2553struct ieee80211_rx_status *rx_status)2554{2555struct ath11k_base *ab = ar->ab;2556struct hal_rx_desc *rx_desc, *lrx_desc;2557struct rx_attention *rx_attention;2558struct ath11k_skb_rxcb *rxcb;2559struct sk_buff *last_buf;2560u8 l3_pad_bytes;2561u8 *hdr_status;2562u16 msdu_len;2563int ret;2564u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;25652566last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);2567if (!last_buf) {2568ath11k_warn(ab,2569"No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");2570ret = -EIO;2571goto free_out;2572}25732574rx_desc = (struct hal_rx_desc *)msdu->data;2575if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {2576ath11k_warn(ar->ab, "msdu len not valid\n");2577ret = -EIO;2578goto free_out;2579}25802581lrx_desc = (struct hal_rx_desc *)last_buf->data;2582rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);2583if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {2584ath11k_warn(ab, "msdu_done bit in attention is not set\n");2585ret = -EIO;2586goto free_out;2587}25882589rxcb = ATH11K_SKB_RXCB(msdu);2590rxcb->rx_desc = rx_desc;2591msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);2592l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);25932594if (rxcb->is_frag) {2595skb_pull(msdu, hal_rx_desc_sz);2596} else if (!rxcb->is_continuation) {2597if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {2598hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);2599ret = -EINVAL;2600ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);2601ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,2602sizeof(struct ieee80211_hdr));2603ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,2604sizeof(struct hal_rx_desc));2605goto free_out;2606}2607skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);2608skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);2609} else {2610ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,2611msdu, last_buf,2612l3_pad_bytes, msdu_len);2613if (ret) {2614ath11k_warn(ab,2615"failed to coalesce msdu rx buffer%d\n", ret);2616goto free_out;2617}2618}26192620ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);2621ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);26222623rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;26242625return 0;26262627free_out:2628return ret;2629}26302631static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,2632struct napi_struct *napi,2633struct sk_buff_head *msdu_list,2634int mac_id)2635{2636struct sk_buff *msdu;2637struct ath11k *ar;2638struct ieee80211_rx_status rx_status = {};2639int ret;26402641if (skb_queue_empty(msdu_list))2642return;26432644if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {2645__skb_queue_purge(msdu_list);2646return;2647}26482649ar = ab->pdevs[mac_id].ar;2650if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {2651__skb_queue_purge(msdu_list);2652return;2653}26542655while ((msdu = __skb_dequeue(msdu_list))) {2656ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);2657if (unlikely(ret)) {2658ath11k_dbg(ab, ATH11K_DBG_DATA,2659"Unable to process msdu %d", ret);2660dev_kfree_skb_any(msdu);2661continue;2662}26632664ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);2665}2666}26672668int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,2669struct napi_struct *napi, int budget)2670{2671struct ath11k_dp *dp = &ab->dp;2672struct dp_rxdma_ring *rx_ring;2673int num_buffs_reaped[MAX_RADIOS] = {};2674struct sk_buff_head msdu_list[MAX_RADIOS];2675struct ath11k_skb_rxcb *rxcb;2676int total_msdu_reaped = 0;2677struct hal_srng *srng;2678struct sk_buff *msdu;2679bool done = false;2680int buf_id, mac_id;2681struct ath11k *ar;2682struct hal_reo_dest_ring *desc;2683enum hal_reo_dest_ring_push_reason push_reason;2684u32 cookie;2685int i;26862687for (i = 0; i < MAX_RADIOS; i++)2688__skb_queue_head_init(&msdu_list[i]);26892690srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];26912692spin_lock_bh(&srng->lock);26932694try_again:2695ath11k_hal_srng_access_begin(ab, srng);26962697while (likely(desc =2698(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,2699srng))) {2700cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,2701desc->buf_addr_info.info1);2702buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,2703cookie);2704mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);27052706if (unlikely(buf_id == 0))2707continue;27082709ar = ab->pdevs[mac_id].ar;2710rx_ring = &ar->dp.rx_refill_buf_ring;2711spin_lock_bh(&rx_ring->idr_lock);2712msdu = idr_find(&rx_ring->bufs_idr, buf_id);2713if (unlikely(!msdu)) {2714ath11k_warn(ab, "frame rx with invalid buf_id %d\n",2715buf_id);2716spin_unlock_bh(&rx_ring->idr_lock);2717continue;2718}27192720idr_remove(&rx_ring->bufs_idr, buf_id);2721spin_unlock_bh(&rx_ring->idr_lock);27222723rxcb = ATH11K_SKB_RXCB(msdu);2724dma_unmap_single(ab->dev, rxcb->paddr,2725msdu->len + skb_tailroom(msdu),2726DMA_FROM_DEVICE);27272728num_buffs_reaped[mac_id]++;27292730push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,2731desc->info0);2732if (unlikely(push_reason !=2733HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {2734dev_kfree_skb_any(msdu);2735ab->soc_stats.hal_reo_error[ring_id]++;2736continue;2737}27382739rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &2740RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);2741rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &2742RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);2743rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &2744RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);2745rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,2746desc->rx_mpdu_info.meta_data);2747rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,2748desc->rx_mpdu_info.info0);2749rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,2750desc->info0);27512752rxcb->mac_id = mac_id;2753__skb_queue_tail(&msdu_list[mac_id], msdu);27542755if (rxcb->is_continuation) {2756done = false;2757} else {2758total_msdu_reaped++;2759done = true;2760}27612762if (total_msdu_reaped >= budget)2763break;2764}27652766/* Hw might have updated the head pointer after we cached it.2767* In this case, even though there are entries in the ring we'll2768* get rx_desc NULL. Give the read another try with updated cached2769* head pointer so that we can reap complete MPDU in the current2770* rx processing.2771*/2772if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {2773ath11k_hal_srng_access_end(ab, srng);2774goto try_again;2775}27762777ath11k_hal_srng_access_end(ab, srng);27782779spin_unlock_bh(&srng->lock);27802781if (unlikely(!total_msdu_reaped))2782goto exit;27832784for (i = 0; i < ab->num_radios; i++) {2785if (!num_buffs_reaped[i])2786continue;27872788ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);27892790ar = ab->pdevs[i].ar;2791rx_ring = &ar->dp.rx_refill_buf_ring;27922793ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],2794ab->hw_params.hal_params->rx_buf_rbm);2795}2796exit:2797return total_msdu_reaped;2798}27992800static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,2801struct hal_rx_mon_ppdu_info *ppdu_info)2802{2803struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;2804u32 num_msdu;2805int i;28062807if (!rx_stats)2808return;28092810arsta->rssi_comb = ppdu_info->rssi_comb;2811ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);28122813num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +2814ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;28152816rx_stats->num_msdu += num_msdu;2817rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +2818ppdu_info->tcp_ack_msdu_count;2819rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;2820rx_stats->other_msdu_count += ppdu_info->other_msdu_count;28212822if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||2823ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {2824ppdu_info->nss = 1;2825ppdu_info->mcs = HAL_RX_MAX_MCS;2826ppdu_info->tid = IEEE80211_NUM_TIDS;2827}28282829if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)2830rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;28312832if (ppdu_info->mcs <= HAL_RX_MAX_MCS)2833rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;28342835if (ppdu_info->gi < HAL_RX_GI_MAX)2836rx_stats->gi_count[ppdu_info->gi] += num_msdu;28372838if (ppdu_info->bw < HAL_RX_BW_MAX)2839rx_stats->bw_count[ppdu_info->bw] += num_msdu;28402841if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)2842rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;28432844if (ppdu_info->tid <= IEEE80211_NUM_TIDS)2845rx_stats->tid_count[ppdu_info->tid] += num_msdu;28462847if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)2848rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;28492850if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)2851rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;28522853if (ppdu_info->is_stbc)2854rx_stats->stbc_count += num_msdu;28552856if (ppdu_info->beamformed)2857rx_stats->beamformed_count += num_msdu;28582859if (ppdu_info->num_mpdu_fcs_ok > 1)2860rx_stats->ampdu_msdu_count += num_msdu;2861else2862rx_stats->non_ampdu_msdu_count += num_msdu;28632864rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;2865rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;2866rx_stats->dcm_count += ppdu_info->dcm;2867rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;28682869BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >2870ARRAY_SIZE(ppdu_info->rssi_chain_pri20));28712872for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)2873arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];28742875rx_stats->rx_duration += ppdu_info->rx_duration;2876arsta->rx_duration = rx_stats->rx_duration;2877}28782879static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,2880struct dp_rxdma_ring *rx_ring,2881int *buf_id)2882{2883struct sk_buff *skb;2884dma_addr_t paddr;28852886skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +2887DP_RX_BUFFER_ALIGN_SIZE);28882889if (!skb)2890goto fail_alloc_skb;28912892if (!IS_ALIGNED((unsigned long)skb->data,2893DP_RX_BUFFER_ALIGN_SIZE)) {2894skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -2895skb->data);2896}28972898paddr = dma_map_single(ab->dev, skb->data,2899skb->len + skb_tailroom(skb),2900DMA_FROM_DEVICE);2901if (unlikely(dma_mapping_error(ab->dev, paddr)))2902goto fail_free_skb;29032904spin_lock_bh(&rx_ring->idr_lock);2905*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,2906rx_ring->bufs_max, GFP_ATOMIC);2907spin_unlock_bh(&rx_ring->idr_lock);2908if (*buf_id < 0)2909goto fail_dma_unmap;29102911ATH11K_SKB_RXCB(skb)->paddr = paddr;2912return skb;29132914fail_dma_unmap:2915dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),2916DMA_FROM_DEVICE);2917fail_free_skb:2918dev_kfree_skb_any(skb);2919fail_alloc_skb:2920return NULL;2921}29222923int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,2924struct dp_rxdma_ring *rx_ring,2925int req_entries,2926enum hal_rx_buf_return_buf_manager mgr)2927{2928struct hal_srng *srng;2929u32 *desc;2930struct sk_buff *skb;2931int num_free;2932int num_remain;2933int buf_id;2934u32 cookie;2935dma_addr_t paddr;29362937req_entries = min(req_entries, rx_ring->bufs_max);29382939srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];29402941spin_lock_bh(&srng->lock);29422943ath11k_hal_srng_access_begin(ab, srng);29442945num_free = ath11k_hal_srng_src_num_free(ab, srng, true);29462947req_entries = min(num_free, req_entries);2948num_remain = req_entries;29492950while (num_remain > 0) {2951skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,2952&buf_id);2953if (!skb)2954break;2955paddr = ATH11K_SKB_RXCB(skb)->paddr;29562957desc = ath11k_hal_srng_src_get_next_entry(ab, srng);2958if (!desc)2959goto fail_desc_get;29602961cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |2962FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);29632964num_remain--;29652966ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);2967}29682969ath11k_hal_srng_access_end(ab, srng);29702971spin_unlock_bh(&srng->lock);29722973return req_entries - num_remain;29742975fail_desc_get:2976spin_lock_bh(&rx_ring->idr_lock);2977idr_remove(&rx_ring->bufs_idr, buf_id);2978spin_unlock_bh(&rx_ring->idr_lock);2979dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),2980DMA_FROM_DEVICE);2981dev_kfree_skb_any(skb);2982ath11k_hal_srng_access_end(ab, srng);2983spin_unlock_bh(&srng->lock);29842985return req_entries - num_remain;2986}29872988#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 3253529892990static void2991ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,2992struct hal_tlv_hdr *tlv)2993{2994struct hal_rx_ppdu_start *ppdu_start;2995u16 ppdu_id_diff, ppdu_id, tlv_len;2996u8 *ptr;29972998/* PPDU id is part of second tlv, move ptr to second tlv */2999tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);3000ptr = (u8 *)tlv;3001ptr += sizeof(*tlv) + tlv_len;3002tlv = (struct hal_tlv_hdr *)ptr;30033004if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)3005return;30063007ptr += sizeof(*tlv);3008ppdu_start = (struct hal_rx_ppdu_start *)ptr;3009ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,3010__le32_to_cpu(ppdu_start->info0));30113012if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {3013pmon->buf_state = DP_MON_STATUS_LEAD;3014ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;3015if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)3016pmon->buf_state = DP_MON_STATUS_LAG;3017} else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {3018pmon->buf_state = DP_MON_STATUS_LAG;3019ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;3020if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)3021pmon->buf_state = DP_MON_STATUS_LEAD;3022}3023}30243025static enum dp_mon_status_buf_state3026ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng,3027struct dp_rxdma_ring *rx_ring)3028{3029struct ath11k_skb_rxcb *rxcb;3030struct hal_tlv_hdr *tlv;3031struct sk_buff *skb;3032void *status_desc;3033dma_addr_t paddr;3034u32 cookie;3035int buf_id;3036u8 rbm;30373038status_desc = ath11k_hal_srng_src_next_peek(ab, srng);3039if (!status_desc)3040return DP_MON_STATUS_NO_DMA;30413042ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);30433044buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);30453046spin_lock_bh(&rx_ring->idr_lock);3047skb = idr_find(&rx_ring->bufs_idr, buf_id);3048spin_unlock_bh(&rx_ring->idr_lock);30493050if (!skb)3051return DP_MON_STATUS_NO_DMA;30523053rxcb = ATH11K_SKB_RXCB(skb);3054dma_sync_single_for_cpu(ab->dev, rxcb->paddr,3055skb->len + skb_tailroom(skb),3056DMA_FROM_DEVICE);30573058tlv = (struct hal_tlv_hdr *)skb->data;3059if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE)3060return DP_MON_STATUS_NO_DMA;30613062return DP_MON_STATUS_REPLINISH;3063}30643065static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,3066int *budget, struct sk_buff_head *skb_list)3067{3068struct ath11k *ar;3069const struct ath11k_hw_hal_params *hal_params;3070enum dp_mon_status_buf_state reap_status;3071struct ath11k_pdev_dp *dp;3072struct dp_rxdma_ring *rx_ring;3073struct ath11k_mon_data *pmon;3074struct hal_srng *srng;3075void *rx_mon_status_desc;3076struct sk_buff *skb;3077struct ath11k_skb_rxcb *rxcb;3078struct hal_tlv_hdr *tlv;3079u32 cookie;3080int buf_id, srng_id;3081dma_addr_t paddr;3082u8 rbm;3083int num_buffs_reaped = 0;30843085ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;3086dp = &ar->dp;3087pmon = &dp->mon_data;3088srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);3089rx_ring = &dp->rx_mon_status_refill_ring[srng_id];30903091srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];30923093spin_lock_bh(&srng->lock);30943095ath11k_hal_srng_access_begin(ab, srng);3096while (*budget) {3097*budget -= 1;3098rx_mon_status_desc =3099ath11k_hal_srng_src_peek(ab, srng);3100if (!rx_mon_status_desc) {3101pmon->buf_state = DP_MON_STATUS_REPLINISH;3102break;3103}31043105ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,3106&cookie, &rbm);3107if (paddr) {3108buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);31093110spin_lock_bh(&rx_ring->idr_lock);3111skb = idr_find(&rx_ring->bufs_idr, buf_id);3112spin_unlock_bh(&rx_ring->idr_lock);31133114if (!skb) {3115ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",3116buf_id);3117pmon->buf_state = DP_MON_STATUS_REPLINISH;3118goto move_next;3119}31203121rxcb = ATH11K_SKB_RXCB(skb);31223123dma_sync_single_for_cpu(ab->dev, rxcb->paddr,3124skb->len + skb_tailroom(skb),3125DMA_FROM_DEVICE);31263127tlv = (struct hal_tlv_hdr *)skb->data;3128if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=3129HAL_RX_STATUS_BUFFER_DONE) {3130ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",3131FIELD_GET(HAL_TLV_HDR_TAG,3132tlv->tl), buf_id);3133/* RxDMA status done bit might not be set even3134* though tp is moved by HW.3135*/31363137/* If done status is missing:3138* 1. As per MAC team's suggestion,3139* when HP + 1 entry is peeked and if DMA3140* is not done and if HP + 2 entry's DMA done3141* is set. skip HP + 1 entry and3142* start processing in next interrupt.3143* 2. If HP + 2 entry's DMA done is not set,3144* poll onto HP + 1 entry DMA done to be set.3145* Check status for same buffer for next time3146* dp_rx_mon_status_srng_process3147*/31483149reap_status = ath11k_dp_rx_mon_buf_done(ab, srng,3150rx_ring);3151if (reap_status == DP_MON_STATUS_NO_DMA)3152continue;31533154spin_lock_bh(&rx_ring->idr_lock);3155idr_remove(&rx_ring->bufs_idr, buf_id);3156spin_unlock_bh(&rx_ring->idr_lock);31573158dma_unmap_single(ab->dev, rxcb->paddr,3159skb->len + skb_tailroom(skb),3160DMA_FROM_DEVICE);31613162dev_kfree_skb_any(skb);3163pmon->buf_state = DP_MON_STATUS_REPLINISH;3164goto move_next;3165}31663167spin_lock_bh(&rx_ring->idr_lock);3168idr_remove(&rx_ring->bufs_idr, buf_id);3169spin_unlock_bh(&rx_ring->idr_lock);3170if (ab->hw_params.full_monitor_mode) {3171ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);3172if (paddr == pmon->mon_status_paddr)3173pmon->buf_state = DP_MON_STATUS_MATCH;3174}31753176dma_unmap_single(ab->dev, rxcb->paddr,3177skb->len + skb_tailroom(skb),3178DMA_FROM_DEVICE);31793180__skb_queue_tail(skb_list, skb);3181} else {3182pmon->buf_state = DP_MON_STATUS_REPLINISH;3183}3184move_next:3185skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,3186&buf_id);31873188if (!skb) {3189hal_params = ab->hw_params.hal_params;3190ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,3191hal_params->rx_buf_rbm);3192num_buffs_reaped++;3193break;3194}3195rxcb = ATH11K_SKB_RXCB(skb);31963197cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |3198FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);31993200ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,3201cookie,3202ab->hw_params.hal_params->rx_buf_rbm);3203ath11k_hal_srng_src_get_next_entry(ab, srng);3204num_buffs_reaped++;3205}3206ath11k_hal_srng_access_end(ab, srng);3207spin_unlock_bh(&srng->lock);32083209return num_buffs_reaped;3210}32113212static void ath11k_dp_rx_frag_timer(struct timer_list *timer)3213{3214struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,3215frag_timer);32163217spin_lock_bh(&rx_tid->ab->base_lock);3218if (rx_tid->last_frag_no &&3219rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {3220spin_unlock_bh(&rx_tid->ab->base_lock);3221return;3222}3223ath11k_dp_rx_frags_cleanup(rx_tid, true);3224spin_unlock_bh(&rx_tid->ab->base_lock);3225}32263227int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)3228{3229struct ath11k_base *ab = ar->ab;3230struct crypto_shash *tfm;3231struct ath11k_peer *peer;3232struct dp_rx_tid *rx_tid;3233int i;32343235tfm = crypto_alloc_shash("michael_mic", 0, 0);3236if (IS_ERR(tfm)) {3237ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",3238PTR_ERR(tfm));3239return PTR_ERR(tfm);3240}32413242spin_lock_bh(&ab->base_lock);32433244peer = ath11k_peer_find(ab, vdev_id, peer_mac);3245if (!peer) {3246ath11k_warn(ab, "failed to find the peer to set up fragment info\n");3247spin_unlock_bh(&ab->base_lock);3248crypto_free_shash(tfm);3249return -ENOENT;3250}32513252for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {3253rx_tid = &peer->rx_tid[i];3254rx_tid->ab = ab;3255timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);3256skb_queue_head_init(&rx_tid->rx_frags);3257}32583259peer->tfm_mmic = tfm;3260peer->dp_setup_done = true;3261spin_unlock_bh(&ab->base_lock);32623263return 0;3264}32653266static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,3267struct ieee80211_hdr *hdr, u8 *data,3268size_t data_len, u8 *mic)3269{3270SHASH_DESC_ON_STACK(desc, tfm);3271u8 mic_hdr[16] = {};3272u8 tid = 0;3273int ret;32743275if (!tfm)3276return -EINVAL;32773278desc->tfm = tfm;32793280ret = crypto_shash_setkey(tfm, key, 8);3281if (ret)3282goto out;32833284ret = crypto_shash_init(desc);3285if (ret)3286goto out;32873288/* TKIP MIC header */3289memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);3290memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);3291if (ieee80211_is_data_qos(hdr->frame_control))3292tid = ieee80211_get_tid(hdr);3293mic_hdr[12] = tid;32943295ret = crypto_shash_update(desc, mic_hdr, 16);3296if (ret)3297goto out;3298ret = crypto_shash_update(desc, data, data_len);3299if (ret)3300goto out;3301ret = crypto_shash_final(desc, mic);3302out:3303shash_desc_zero(desc);3304return ret;3305}33063307static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,3308struct sk_buff *msdu)3309{3310struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;3311struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);3312struct ieee80211_key_conf *key_conf;3313struct ieee80211_hdr *hdr;3314u8 mic[IEEE80211_CCMP_MIC_LEN];3315int head_len, tail_len, ret;3316size_t data_len;3317u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;3318u8 *key, *data;3319u8 key_idx;33203321if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=3322HAL_ENCRYPT_TYPE_TKIP_MIC)3323return 0;33243325hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);3326hdr_len = ieee80211_hdrlen(hdr->frame_control);3327head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;3328tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;33293330if (!is_multicast_ether_addr(hdr->addr1))3331key_idx = peer->ucast_keyidx;3332else3333key_idx = peer->mcast_keyidx;33343335key_conf = peer->keys[key_idx];33363337data = msdu->data + head_len;3338data_len = msdu->len - head_len - tail_len;3339key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];33403341ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);3342if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))3343goto mic_fail;33443345return 0;33463347mic_fail:3348(ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;3349(ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;33503351rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |3352RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;3353skb_pull(msdu, hal_rx_desc_sz);33543355ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);3356ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,3357HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);3358ieee80211_rx(ar->hw, msdu);3359return -EINVAL;3360}33613362static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,3363enum hal_encrypt_type enctype, u32 flags)3364{3365struct ieee80211_hdr *hdr;3366size_t hdr_len;3367size_t crypto_len;3368u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;33693370if (!flags)3371return;33723373hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);33743375if (flags & RX_FLAG_MIC_STRIPPED)3376skb_trim(msdu, msdu->len -3377ath11k_dp_rx_crypto_mic_len(ar, enctype));33783379if (flags & RX_FLAG_ICV_STRIPPED)3380skb_trim(msdu, msdu->len -3381ath11k_dp_rx_crypto_icv_len(ar, enctype));33823383if (flags & RX_FLAG_IV_STRIPPED) {3384hdr_len = ieee80211_hdrlen(hdr->frame_control);3385crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);33863387#if defined(__linux__)3388memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,3389(void *)msdu->data + hal_rx_desc_sz, hdr_len);3390#elif defined(__FreeBSD__)3391memmove((u8 *)msdu->data + hal_rx_desc_sz + crypto_len,3392(u8 *)msdu->data + hal_rx_desc_sz, hdr_len);3393#endif3394skb_pull(msdu, crypto_len);3395}3396}33973398static int ath11k_dp_rx_h_defrag(struct ath11k *ar,3399struct ath11k_peer *peer,3400struct dp_rx_tid *rx_tid,3401struct sk_buff **defrag_skb)3402{3403struct hal_rx_desc *rx_desc;3404struct sk_buff *skb, *first_frag, *last_frag;3405struct ieee80211_hdr *hdr;3406struct rx_attention *rx_attention;3407enum hal_encrypt_type enctype;3408bool is_decrypted = false;3409int msdu_len = 0;3410int extra_space;3411u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;34123413first_frag = skb_peek(&rx_tid->rx_frags);3414last_frag = skb_peek_tail(&rx_tid->rx_frags);34153416skb_queue_walk(&rx_tid->rx_frags, skb) {3417flags = 0;3418rx_desc = (struct hal_rx_desc *)skb->data;3419hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);34203421enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);3422if (enctype != HAL_ENCRYPT_TYPE_OPEN) {3423rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);3424is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);3425}34263427if (is_decrypted) {3428if (skb != first_frag)3429flags |= RX_FLAG_IV_STRIPPED;3430if (skb != last_frag)3431flags |= RX_FLAG_ICV_STRIPPED |3432RX_FLAG_MIC_STRIPPED;3433}34343435/* RX fragments are always raw packets */3436if (skb != last_frag)3437skb_trim(skb, skb->len - FCS_LEN);3438ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);34393440if (skb != first_frag)3441skb_pull(skb, hal_rx_desc_sz +3442ieee80211_hdrlen(hdr->frame_control));3443msdu_len += skb->len;3444}34453446extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));3447if (extra_space > 0 &&3448(pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))3449return -ENOMEM;34503451__skb_unlink(first_frag, &rx_tid->rx_frags);3452while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {3453skb_put_data(first_frag, skb->data, skb->len);3454dev_kfree_skb_any(skb);3455}34563457hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);3458hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);3459ATH11K_SKB_RXCB(first_frag)->is_frag = 1;34603461if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))3462first_frag = NULL;34633464*defrag_skb = first_frag;3465return 0;3466}34673468static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,3469struct sk_buff *defrag_skb)3470{3471struct ath11k_base *ab = ar->ab;3472struct ath11k_pdev_dp *dp = &ar->dp;3473struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;3474struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;3475struct hal_reo_entrance_ring *reo_ent_ring;3476struct hal_reo_dest_ring *reo_dest_ring;3477struct dp_link_desc_bank *link_desc_banks;3478struct hal_rx_msdu_link *msdu_link;3479struct hal_rx_msdu_details *msdu0;3480struct hal_srng *srng;3481dma_addr_t paddr;3482u32 desc_bank, msdu_info, mpdu_info;3483u32 dst_idx, cookie, hal_rx_desc_sz;3484int ret, buf_id;34853486hal_rx_desc_sz = ab->hw_params.hal_desc_sz;3487link_desc_banks = ab->dp.link_desc_banks;3488reo_dest_ring = rx_tid->dst_ring_desc;34893490ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);3491#if defined(__linux__)3492msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +3493#elif defined(__FreeBSD__)3494msdu_link = (struct hal_rx_msdu_link *)((u8 *)link_desc_banks[desc_bank].vaddr +3495#endif3496(paddr - link_desc_banks[desc_bank].paddr));3497msdu0 = &msdu_link->msdu_link[0];3498dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);3499memset(msdu0, 0, sizeof(*msdu0));35003501msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |3502FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |3503FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |3504FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,3505defrag_skb->len - hal_rx_desc_sz) |3506FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |3507FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |3508FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);3509msdu0->rx_msdu_info.info0 = msdu_info;35103511/* change msdu len in hal rx desc */3512ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);35133514paddr = dma_map_single(ab->dev, defrag_skb->data,3515defrag_skb->len + skb_tailroom(defrag_skb),3516DMA_TO_DEVICE);3517if (dma_mapping_error(ab->dev, paddr))3518return -ENOMEM;35193520spin_lock_bh(&rx_refill_ring->idr_lock);3521buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,3522rx_refill_ring->bufs_max * 3, GFP_ATOMIC);3523spin_unlock_bh(&rx_refill_ring->idr_lock);3524if (buf_id < 0) {3525ret = -ENOMEM;3526goto err_unmap_dma;3527}35283529ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;3530cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |3531FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);35323533ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,3534ab->hw_params.hal_params->rx_buf_rbm);35353536/* Fill mpdu details into reo entrance ring */3537srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];35383539spin_lock_bh(&srng->lock);3540ath11k_hal_srng_access_begin(ab, srng);35413542reo_ent_ring = (struct hal_reo_entrance_ring *)3543ath11k_hal_srng_src_get_next_entry(ab, srng);3544if (!reo_ent_ring) {3545ath11k_hal_srng_access_end(ab, srng);3546spin_unlock_bh(&srng->lock);3547ret = -ENOSPC;3548goto err_free_idr;3549}3550memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));35513552ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);3553ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,3554HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);35553556mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |3557FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |3558FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |3559FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |3560FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |3561FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |3562FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);35633564reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;3565reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;3566reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;3567reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,3568FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,3569reo_dest_ring->info0)) |3570FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);3571ath11k_hal_srng_access_end(ab, srng);3572spin_unlock_bh(&srng->lock);35733574return 0;35753576err_free_idr:3577spin_lock_bh(&rx_refill_ring->idr_lock);3578idr_remove(&rx_refill_ring->bufs_idr, buf_id);3579spin_unlock_bh(&rx_refill_ring->idr_lock);3580err_unmap_dma:3581dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),3582DMA_TO_DEVICE);3583return ret;3584}35853586static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,3587struct sk_buff *a, struct sk_buff *b)3588{3589int frag1, frag2;35903591frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);3592frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);35933594return frag1 - frag2;3595}35963597static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,3598struct sk_buff_head *frag_list,3599struct sk_buff *cur_frag)3600{3601struct sk_buff *skb;3602int cmp;36033604skb_queue_walk(frag_list, skb) {3605cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);3606if (cmp < 0)3607continue;3608__skb_queue_before(frag_list, skb, cur_frag);3609return;3610}3611__skb_queue_tail(frag_list, cur_frag);3612}36133614static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)3615{3616struct ieee80211_hdr *hdr;3617u64 pn = 0;3618u8 *ehdr;3619u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;36203621hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);3622ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);36233624pn = ehdr[0];3625pn |= (u64)ehdr[1] << 8;3626pn |= (u64)ehdr[4] << 16;3627pn |= (u64)ehdr[5] << 24;3628pn |= (u64)ehdr[6] << 32;3629pn |= (u64)ehdr[7] << 40;36303631return pn;3632}36333634static bool3635ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)3636{3637enum hal_encrypt_type encrypt_type;3638struct sk_buff *first_frag, *skb;3639struct hal_rx_desc *desc;3640u64 last_pn;3641u64 cur_pn;36423643first_frag = skb_peek(&rx_tid->rx_frags);3644desc = (struct hal_rx_desc *)first_frag->data;36453646encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);3647if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&3648encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&3649encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&3650encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)3651return true;36523653last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);3654skb_queue_walk(&rx_tid->rx_frags, skb) {3655if (skb == first_frag)3656continue;36573658cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);3659if (cur_pn != last_pn + 1)3660return false;3661last_pn = cur_pn;3662}3663return true;3664}36653666static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,3667struct sk_buff *msdu,3668u32 *ring_desc)3669{3670struct ath11k_base *ab = ar->ab;3671struct hal_rx_desc *rx_desc;3672struct ath11k_peer *peer;3673struct dp_rx_tid *rx_tid;3674struct sk_buff *defrag_skb = NULL;3675u32 peer_id;3676u16 seqno, frag_no;3677u8 tid;3678int ret = 0;3679bool more_frags;3680bool is_mcbc;36813682rx_desc = (struct hal_rx_desc *)msdu->data;3683peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);3684tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);3685seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);3686frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);3687more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);3688is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);36893690/* Multicast/Broadcast fragments are not expected */3691if (is_mcbc)3692return -EINVAL;36933694if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||3695!ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||3696tid > IEEE80211_NUM_TIDS)3697return -EINVAL;36983699/* received unfragmented packet in reo3700* exception ring, this shouldn't happen3701* as these packets typically come from3702* reo2sw srngs.3703*/3704if (WARN_ON_ONCE(!frag_no && !more_frags))3705return -EINVAL;37063707spin_lock_bh(&ab->base_lock);3708peer = ath11k_peer_find_by_id(ab, peer_id);3709if (!peer) {3710ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",3711peer_id);3712ret = -ENOENT;3713goto out_unlock;3714}3715if (!peer->dp_setup_done) {3716ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",3717peer->addr, peer_id);3718ret = -ENOENT;3719goto out_unlock;3720}37213722rx_tid = &peer->rx_tid[tid];37233724if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||3725skb_queue_empty(&rx_tid->rx_frags)) {3726/* Flush stored fragments and start a new sequence */3727ath11k_dp_rx_frags_cleanup(rx_tid, true);3728rx_tid->cur_sn = seqno;3729}37303731if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {3732/* Fragment already present */3733ret = -EINVAL;3734goto out_unlock;3735}37363737if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))3738__skb_queue_tail(&rx_tid->rx_frags, msdu);3739else3740ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);37413742rx_tid->rx_frag_bitmap |= BIT(frag_no);3743if (!more_frags)3744rx_tid->last_frag_no = frag_no;37453746if (frag_no == 0) {3747rx_tid->dst_ring_desc = kmemdup(ring_desc,3748sizeof(*rx_tid->dst_ring_desc),3749GFP_ATOMIC);3750if (!rx_tid->dst_ring_desc) {3751ret = -ENOMEM;3752goto out_unlock;3753}3754} else {3755ath11k_dp_rx_link_desc_return(ab, ring_desc,3756HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);3757}37583759if (!rx_tid->last_frag_no ||3760rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {3761mod_timer(&rx_tid->frag_timer, jiffies +3762ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);3763goto out_unlock;3764}37653766spin_unlock_bh(&ab->base_lock);3767timer_delete_sync(&rx_tid->frag_timer);3768spin_lock_bh(&ab->base_lock);37693770peer = ath11k_peer_find_by_id(ab, peer_id);3771if (!peer)3772goto err_frags_cleanup;37733774if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))3775goto err_frags_cleanup;37763777if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))3778goto err_frags_cleanup;37793780if (!defrag_skb)3781goto err_frags_cleanup;37823783if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))3784goto err_frags_cleanup;37853786ath11k_dp_rx_frags_cleanup(rx_tid, false);3787goto out_unlock;37883789err_frags_cleanup:3790dev_kfree_skb_any(defrag_skb);3791ath11k_dp_rx_frags_cleanup(rx_tid, true);3792out_unlock:3793spin_unlock_bh(&ab->base_lock);3794return ret;3795}37963797static int3798ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)3799{3800struct ath11k_pdev_dp *dp = &ar->dp;3801struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;3802struct sk_buff *msdu;3803struct ath11k_skb_rxcb *rxcb;3804struct hal_rx_desc *rx_desc;3805u8 *hdr_status;3806u16 msdu_len;3807u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;38083809spin_lock_bh(&rx_ring->idr_lock);3810msdu = idr_find(&rx_ring->bufs_idr, buf_id);3811if (!msdu) {3812ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",3813buf_id);3814spin_unlock_bh(&rx_ring->idr_lock);3815return -EINVAL;3816}38173818idr_remove(&rx_ring->bufs_idr, buf_id);3819spin_unlock_bh(&rx_ring->idr_lock);38203821rxcb = ATH11K_SKB_RXCB(msdu);3822dma_unmap_single(ar->ab->dev, rxcb->paddr,3823msdu->len + skb_tailroom(msdu),3824DMA_FROM_DEVICE);38253826if (drop) {3827dev_kfree_skb_any(msdu);3828return 0;3829}38303831rcu_read_lock();3832if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {3833dev_kfree_skb_any(msdu);3834goto exit;3835}38363837if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {3838dev_kfree_skb_any(msdu);3839goto exit;3840}38413842rx_desc = (struct hal_rx_desc *)msdu->data;3843msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);3844if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {3845hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);3846ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);3847ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,3848sizeof(struct ieee80211_hdr));3849ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,3850sizeof(struct hal_rx_desc));3851dev_kfree_skb_any(msdu);3852goto exit;3853}38543855skb_put(msdu, hal_rx_desc_sz + msdu_len);38563857if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {3858dev_kfree_skb_any(msdu);3859ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,3860HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);3861}3862exit:3863rcu_read_unlock();3864return 0;3865}38663867int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,3868int budget)3869{3870u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];3871struct dp_link_desc_bank *link_desc_banks;3872enum hal_rx_buf_return_buf_manager rbm;3873int tot_n_bufs_reaped, quota, ret, i;3874int n_bufs_reaped[MAX_RADIOS] = {};3875struct dp_rxdma_ring *rx_ring;3876struct dp_srng *reo_except;3877u32 desc_bank, num_msdus;3878struct hal_srng *srng;3879struct ath11k_dp *dp;3880void *link_desc_va;3881int buf_id, mac_id;3882struct ath11k *ar;3883dma_addr_t paddr;3884u32 *desc;3885bool is_frag;3886u8 drop = 0;38873888tot_n_bufs_reaped = 0;3889quota = budget;38903891dp = &ab->dp;3892reo_except = &dp->reo_except_ring;3893link_desc_banks = dp->link_desc_banks;38943895srng = &ab->hal.srng_list[reo_except->ring_id];38963897spin_lock_bh(&srng->lock);38983899ath11k_hal_srng_access_begin(ab, srng);39003901while (budget &&3902(desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {3903struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;39043905ab->soc_stats.err_ring_pkts++;3906ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,3907&desc_bank);3908if (ret) {3909ath11k_warn(ab, "failed to parse error reo desc %d\n",3910ret);3911continue;3912}3913#if defined(__linux__)3914link_desc_va = link_desc_banks[desc_bank].vaddr +3915#elif defined(__FreeBSD__)3916link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +3917#endif3918(paddr - link_desc_banks[desc_bank].paddr);3919ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,3920&rbm);3921if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&3922rbm != HAL_RX_BUF_RBM_SW1_BM &&3923rbm != HAL_RX_BUF_RBM_SW3_BM) {3924ab->soc_stats.invalid_rbm++;3925ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);3926ath11k_dp_rx_link_desc_return(ab, desc,3927HAL_WBM_REL_BM_ACT_REL_MSDU);3928continue;3929}39303931is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);39323933/* Process only rx fragments with one msdu per link desc below, and drop3934* msdu's indicated due to error reasons.3935*/3936if (!is_frag || num_msdus > 1) {3937drop = 1;3938/* Return the link desc back to wbm idle list */3939ath11k_dp_rx_link_desc_return(ab, desc,3940HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);3941}39423943for (i = 0; i < num_msdus; i++) {3944buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,3945msdu_cookies[i]);39463947mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,3948msdu_cookies[i]);39493950ar = ab->pdevs[mac_id].ar;39513952if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {3953n_bufs_reaped[mac_id]++;3954tot_n_bufs_reaped++;3955}3956}39573958if (tot_n_bufs_reaped >= quota) {3959tot_n_bufs_reaped = quota;3960goto exit;3961}39623963budget = quota - tot_n_bufs_reaped;3964}39653966exit:3967ath11k_hal_srng_access_end(ab, srng);39683969spin_unlock_bh(&srng->lock);39703971for (i = 0; i < ab->num_radios; i++) {3972if (!n_bufs_reaped[i])3973continue;39743975ar = ab->pdevs[i].ar;3976rx_ring = &ar->dp.rx_refill_buf_ring;39773978ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],3979ab->hw_params.hal_params->rx_buf_rbm);3980}39813982return tot_n_bufs_reaped;3983}39843985static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,3986int msdu_len,3987struct sk_buff_head *msdu_list)3988{3989struct sk_buff *skb, *tmp;3990struct ath11k_skb_rxcb *rxcb;3991int n_buffs;39923993n_buffs = DIV_ROUND_UP(msdu_len,3994(DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));39953996skb_queue_walk_safe(msdu_list, skb, tmp) {3997rxcb = ATH11K_SKB_RXCB(skb);3998if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&3999rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {4000if (!n_buffs)4001break;4002__skb_unlink(skb, msdu_list);4003dev_kfree_skb_any(skb);4004n_buffs--;4005}4006}4007}40084009static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,4010struct ieee80211_rx_status *status,4011struct sk_buff_head *msdu_list)4012{4013u16 msdu_len;4014struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;4015struct rx_attention *rx_attention;4016u8 l3pad_bytes;4017struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);4018u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;40194020msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);40214022if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {4023/* First buffer will be freed by the caller, so deduct it's length */4024msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);4025ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);4026return -EINVAL;4027}40284029rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);4030if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {4031ath11k_warn(ar->ab,4032"msdu_done bit not set in null_q_des processing\n");4033__skb_queue_purge(msdu_list);4034return -EIO;4035}40364037/* Handle NULL queue descriptor violations arising out a missing4038* REO queue for a given peer or a given TID. This typically4039* may happen if a packet is received on a QOS enabled TID before the4040* ADDBA negotiation for that TID, when the TID queue is setup. Or4041* it may also happen for MC/BC frames if they are not routed to the4042* non-QOS TID queue, in the absence of any other default TID queue.4043* This error can show up both in a REO destination or WBM release ring.4044*/40454046rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);4047rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);40484049if (rxcb->is_frag) {4050skb_pull(msdu, hal_rx_desc_sz);4051} else {4052l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);40534054if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)4055return -EINVAL;40564057skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);4058skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);4059}4060ath11k_dp_rx_h_ppdu(ar, desc, status);40614062ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);40634064rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);40654066/* Please note that caller will having the access to msdu and completing4067* rx with mac80211. Need not worry about cleaning up amsdu_list.4068*/40694070return 0;4071}40724073static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,4074struct ieee80211_rx_status *status,4075struct sk_buff_head *msdu_list)4076{4077struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);4078bool drop = false;40794080ar->ab->soc_stats.reo_error[rxcb->err_code]++;40814082switch (rxcb->err_code) {4083case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:4084if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))4085drop = true;4086break;4087case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:4088/* TODO: Do not drop PN failed packets in the driver;4089* instead, it is good to drop such packets in mac802114090* after incrementing the replay counters.4091*/4092fallthrough;4093default:4094/* TODO: Review other errors and process them to mac802114095* as appropriate.4096*/4097drop = true;4098break;4099}41004101return drop;4102}41034104static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,4105struct ieee80211_rx_status *status)4106{4107u16 msdu_len;4108struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;4109u8 l3pad_bytes;4110struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);4111u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;41124113rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);4114rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);41154116l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);4117msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);4118skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);4119skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);41204121ath11k_dp_rx_h_ppdu(ar, desc, status);41224123status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |4124RX_FLAG_DECRYPTED);41254126ath11k_dp_rx_h_undecap(ar, msdu, desc,4127HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);4128}41294130static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,4131struct ieee80211_rx_status *status)4132{4133struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);4134bool drop = false;41354136ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;41374138switch (rxcb->err_code) {4139case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:4140ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);4141break;4142default:4143/* TODO: Review other rxdma error code to check if anything is4144* worth reporting to mac802114145*/4146drop = true;4147break;4148}41494150return drop;4151}41524153static void ath11k_dp_rx_wbm_err(struct ath11k *ar,4154struct napi_struct *napi,4155struct sk_buff *msdu,4156struct sk_buff_head *msdu_list)4157{4158struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);4159struct ieee80211_rx_status rxs = {};4160bool drop = true;41614162switch (rxcb->err_rel_src) {4163case HAL_WBM_REL_SRC_MODULE_REO:4164drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);4165break;4166case HAL_WBM_REL_SRC_MODULE_RXDMA:4167drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);4168break;4169default:4170/* msdu will get freed */4171break;4172}41734174if (drop) {4175dev_kfree_skb_any(msdu);4176return;4177}41784179ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);4180}41814182int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,4183struct napi_struct *napi, int budget)4184{4185struct ath11k *ar;4186struct ath11k_dp *dp = &ab->dp;4187struct dp_rxdma_ring *rx_ring;4188struct hal_rx_wbm_rel_info err_info;4189struct hal_srng *srng;4190struct sk_buff *msdu;4191struct sk_buff_head msdu_list[MAX_RADIOS];4192struct ath11k_skb_rxcb *rxcb;4193u32 *rx_desc;4194int buf_id, mac_id;4195int num_buffs_reaped[MAX_RADIOS] = {};4196int total_num_buffs_reaped = 0;4197int ret, i;41984199for (i = 0; i < ab->num_radios; i++)4200__skb_queue_head_init(&msdu_list[i]);42014202srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];42034204spin_lock_bh(&srng->lock);42054206ath11k_hal_srng_access_begin(ab, srng);42074208while (budget) {4209rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);4210if (!rx_desc)4211break;42124213ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);4214if (ret) {4215ath11k_warn(ab,4216"failed to parse rx error in wbm_rel ring desc %d\n",4217ret);4218continue;4219}42204221buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);4222mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);42234224ar = ab->pdevs[mac_id].ar;4225rx_ring = &ar->dp.rx_refill_buf_ring;42264227spin_lock_bh(&rx_ring->idr_lock);4228msdu = idr_find(&rx_ring->bufs_idr, buf_id);4229if (!msdu) {4230ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",4231buf_id, mac_id);4232spin_unlock_bh(&rx_ring->idr_lock);4233continue;4234}42354236idr_remove(&rx_ring->bufs_idr, buf_id);4237spin_unlock_bh(&rx_ring->idr_lock);42384239rxcb = ATH11K_SKB_RXCB(msdu);4240dma_unmap_single(ab->dev, rxcb->paddr,4241msdu->len + skb_tailroom(msdu),4242DMA_FROM_DEVICE);42434244num_buffs_reaped[mac_id]++;4245total_num_buffs_reaped++;4246budget--;42474248if (err_info.push_reason !=4249HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {4250dev_kfree_skb_any(msdu);4251continue;4252}42534254rxcb->err_rel_src = err_info.err_rel_src;4255rxcb->err_code = err_info.err_code;4256rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;4257__skb_queue_tail(&msdu_list[mac_id], msdu);4258}42594260ath11k_hal_srng_access_end(ab, srng);42614262spin_unlock_bh(&srng->lock);42634264if (!total_num_buffs_reaped)4265goto done;42664267for (i = 0; i < ab->num_radios; i++) {4268if (!num_buffs_reaped[i])4269continue;42704271ar = ab->pdevs[i].ar;4272rx_ring = &ar->dp.rx_refill_buf_ring;42734274ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],4275ab->hw_params.hal_params->rx_buf_rbm);4276}42774278rcu_read_lock();4279for (i = 0; i < ab->num_radios; i++) {4280if (!rcu_dereference(ab->pdevs_active[i])) {4281__skb_queue_purge(&msdu_list[i]);4282continue;4283}42844285ar = ab->pdevs[i].ar;42864287if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {4288__skb_queue_purge(&msdu_list[i]);4289continue;4290}42914292while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)4293ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);4294}4295rcu_read_unlock();4296done:4297return total_num_buffs_reaped;4298}42994300int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)4301{4302struct ath11k *ar;4303struct dp_srng *err_ring;4304struct dp_rxdma_ring *rx_ring;4305struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;4306struct hal_srng *srng;4307u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];4308enum hal_rx_buf_return_buf_manager rbm;4309enum hal_reo_entr_rxdma_ecode rxdma_err_code;4310struct ath11k_skb_rxcb *rxcb;4311struct sk_buff *skb;4312struct hal_reo_entrance_ring *entr_ring;4313void *desc;4314int num_buf_freed = 0;4315int quota = budget;4316dma_addr_t paddr;4317u32 desc_bank;4318void *link_desc_va;4319int num_msdus;4320int i;4321int buf_id;43224323ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;4324err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,4325mac_id)];4326rx_ring = &ar->dp.rx_refill_buf_ring;43274328srng = &ab->hal.srng_list[err_ring->ring_id];43294330spin_lock_bh(&srng->lock);43314332ath11k_hal_srng_access_begin(ab, srng);43334334while (quota-- &&4335(desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {4336ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);43374338entr_ring = (struct hal_reo_entrance_ring *)desc;4339rxdma_err_code =4340FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,4341entr_ring->info1);4342ab->soc_stats.rxdma_error[rxdma_err_code]++;43434344#if defined(__linux__)4345link_desc_va = link_desc_banks[desc_bank].vaddr +4346#elif defined(__FreeBSD__)4347link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +4348#endif4349(paddr - link_desc_banks[desc_bank].paddr);4350ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,4351msdu_cookies, &rbm);43524353for (i = 0; i < num_msdus; i++) {4354buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,4355msdu_cookies[i]);43564357spin_lock_bh(&rx_ring->idr_lock);4358skb = idr_find(&rx_ring->bufs_idr, buf_id);4359if (!skb) {4360ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",4361buf_id);4362spin_unlock_bh(&rx_ring->idr_lock);4363continue;4364}43654366idr_remove(&rx_ring->bufs_idr, buf_id);4367spin_unlock_bh(&rx_ring->idr_lock);43684369rxcb = ATH11K_SKB_RXCB(skb);4370dma_unmap_single(ab->dev, rxcb->paddr,4371skb->len + skb_tailroom(skb),4372DMA_FROM_DEVICE);4373dev_kfree_skb_any(skb);43744375num_buf_freed++;4376}43774378ath11k_dp_rx_link_desc_return(ab, desc,4379HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);4380}43814382ath11k_hal_srng_access_end(ab, srng);43834384spin_unlock_bh(&srng->lock);43854386if (num_buf_freed)4387ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,4388ab->hw_params.hal_params->rx_buf_rbm);43894390return budget - quota;4391}43924393void ath11k_dp_process_reo_status(struct ath11k_base *ab)4394{4395struct ath11k_dp *dp = &ab->dp;4396struct hal_srng *srng;4397struct dp_reo_cmd *cmd, *tmp;4398bool found = false;4399u32 *reo_desc;4400u16 tag;4401struct hal_reo_status reo_status;44024403srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];44044405memset(&reo_status, 0, sizeof(reo_status));44064407spin_lock_bh(&srng->lock);44084409ath11k_hal_srng_access_begin(ab, srng);44104411while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {4412tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);44134414switch (tag) {4415case HAL_REO_GET_QUEUE_STATS_STATUS:4416ath11k_hal_reo_status_queue_stats(ab, reo_desc,4417&reo_status);4418break;4419case HAL_REO_FLUSH_QUEUE_STATUS:4420ath11k_hal_reo_flush_queue_status(ab, reo_desc,4421&reo_status);4422break;4423case HAL_REO_FLUSH_CACHE_STATUS:4424ath11k_hal_reo_flush_cache_status(ab, reo_desc,4425&reo_status);4426break;4427case HAL_REO_UNBLOCK_CACHE_STATUS:4428ath11k_hal_reo_unblk_cache_status(ab, reo_desc,4429&reo_status);4430break;4431case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:4432ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,4433&reo_status);4434break;4435case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:4436ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,4437&reo_status);4438break;4439case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:4440ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,4441&reo_status);4442break;4443default:4444ath11k_warn(ab, "Unknown reo status type %d\n", tag);4445continue;4446}44474448spin_lock_bh(&dp->reo_cmd_lock);4449list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {4450if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {4451found = true;4452list_del(&cmd->list);4453break;4454}4455}4456spin_unlock_bh(&dp->reo_cmd_lock);44574458if (found) {4459cmd->handler(dp, (void *)&cmd->data,4460reo_status.uniform_hdr.cmd_status);4461kfree(cmd);4462}44634464found = false;4465}44664467ath11k_hal_srng_access_end(ab, srng);44684469spin_unlock_bh(&srng->lock);4470}44714472void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)4473{4474struct ath11k *ar = ab->pdevs[mac_id].ar;44754476ath11k_dp_rx_pdev_srng_free(ar);4477ath11k_dp_rxdma_pdev_buf_free(ar);4478}44794480int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)4481{4482struct ath11k *ar = ab->pdevs[mac_id].ar;4483struct ath11k_pdev_dp *dp = &ar->dp;4484u32 ring_id;4485int i;4486int ret;44874488ret = ath11k_dp_rx_pdev_srng_alloc(ar);4489if (ret) {4490ath11k_warn(ab, "failed to setup rx srngs\n");4491return ret;4492}44934494ret = ath11k_dp_rxdma_pdev_buf_setup(ar);4495if (ret) {4496ath11k_warn(ab, "failed to setup rxdma ring\n");4497return ret;4498}44994500ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;4501ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);4502if (ret) {4503ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",4504ret);4505return ret;4506}45074508if (ab->hw_params.rx_mac_buf_ring) {4509for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {4510ring_id = dp->rx_mac_buf_ring[i].ring_id;4511ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,4512mac_id + i, HAL_RXDMA_BUF);4513if (ret) {4514ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",4515i, ret);4516return ret;4517}4518}4519}45204521for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {4522ring_id = dp->rxdma_err_dst_ring[i].ring_id;4523ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,4524mac_id + i, HAL_RXDMA_DST);4525if (ret) {4526ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",4527i, ret);4528return ret;4529}4530}45314532if (!ab->hw_params.rxdma1_enable)4533goto config_refill_ring;45344535ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;4536ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,4537mac_id, HAL_RXDMA_MONITOR_BUF);4538if (ret) {4539ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",4540ret);4541return ret;4542}4543ret = ath11k_dp_tx_htt_srng_setup(ab,4544dp->rxdma_mon_dst_ring.ring_id,4545mac_id, HAL_RXDMA_MONITOR_DST);4546if (ret) {4547ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",4548ret);4549return ret;4550}4551ret = ath11k_dp_tx_htt_srng_setup(ab,4552dp->rxdma_mon_desc_ring.ring_id,4553mac_id, HAL_RXDMA_MONITOR_DESC);4554if (ret) {4555ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",4556ret);4557return ret;4558}45594560config_refill_ring:4561for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {4562ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;4563ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,4564HAL_RXDMA_MONITOR_STATUS);4565if (ret) {4566ath11k_warn(ab,4567"failed to configure mon_status_refill_ring%d %d\n",4568i, ret);4569return ret;4570}4571}45724573return 0;4574}45754576static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)4577{4578if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {4579*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);4580*total_len -= *frag_len;4581} else {4582*frag_len = *total_len;4583*total_len = 0;4584}4585}45864587static4588int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,4589void *p_last_buf_addr_info,4590u8 mac_id)4591{4592struct ath11k_pdev_dp *dp = &ar->dp;4593struct dp_srng *dp_srng;4594void *hal_srng;4595void *src_srng_desc;4596int ret = 0;45974598if (ar->ab->hw_params.rxdma1_enable) {4599dp_srng = &dp->rxdma_mon_desc_ring;4600hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];4601} else {4602dp_srng = &ar->ab->dp.wbm_desc_rel_ring;4603hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];4604}46054606ath11k_hal_srng_access_begin(ar->ab, hal_srng);46074608src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);46094610if (src_srng_desc) {4611struct ath11k_buffer_addr *src_desc = src_srng_desc;46124613*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);4614} else {4615ath11k_dbg(ar->ab, ATH11K_DBG_DATA,4616"Monitor Link Desc Ring %d Full", mac_id);4617ret = -ENOMEM;4618}46194620ath11k_hal_srng_access_end(ar->ab, hal_srng);4621return ret;4622}46234624static4625void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,4626dma_addr_t *paddr, u32 *sw_cookie,4627u8 *rbm,4628void **pp_buf_addr_info)4629{4630struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc;4631struct ath11k_buffer_addr *buf_addr_info;46324633buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;46344635ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);46364637*pp_buf_addr_info = (void *)buf_addr_info;4638}46394640static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)4641{4642if (skb->len > len) {4643skb_trim(skb, len);4644} else {4645if (skb_tailroom(skb) < len - skb->len) {4646if ((pskb_expand_head(skb, 0,4647len - skb->len - skb_tailroom(skb),4648GFP_ATOMIC))) {4649dev_kfree_skb_any(skb);4650return -ENOMEM;4651}4652}4653skb_put(skb, (len - skb->len));4654}4655return 0;4656}46574658static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,4659void *msdu_link_desc,4660struct hal_rx_msdu_list *msdu_list,4661u16 *num_msdus)4662{4663struct hal_rx_msdu_details *msdu_details = NULL;4664struct rx_msdu_desc *msdu_desc_info = NULL;4665struct hal_rx_msdu_link *msdu_link = NULL;4666int i;4667u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);4668u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);4669u8 tmp = 0;46704671msdu_link = msdu_link_desc;4672msdu_details = &msdu_link->msdu_link[0];46734674for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {4675if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,4676msdu_details[i].buf_addr_info.info0) == 0) {4677msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;4678msdu_desc_info->info0 |= last;4679break;4680}4681msdu_desc_info = &msdu_details[i].rx_msdu_info;46824683if (!i)4684msdu_desc_info->info0 |= first;4685else if (i == (HAL_RX_NUM_MSDU_DESC - 1))4686msdu_desc_info->info0 |= last;4687msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;4688msdu_list->msdu_info[i].msdu_len =4689HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);4690msdu_list->sw_cookie[i] =4691FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,4692msdu_details[i].buf_addr_info.info1);4693tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,4694msdu_details[i].buf_addr_info.info1);4695msdu_list->rbm[i] = tmp;4696}4697*num_msdus = i;4698}46994700static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,4701u32 *rx_bufs_used)4702{4703u32 ret = 0;47044705if ((*ppdu_id < msdu_ppdu_id) &&4706((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {4707*ppdu_id = msdu_ppdu_id;4708ret = msdu_ppdu_id;4709} else if ((*ppdu_id > msdu_ppdu_id) &&4710((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {4711/* mon_dst is behind than mon_status4712* skip dst_ring and free it4713*/4714*rx_bufs_used += 1;4715*ppdu_id = msdu_ppdu_id;4716ret = msdu_ppdu_id;4717}4718return ret;4719}47204721static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,4722bool *is_frag, u32 *total_len,4723u32 *frag_len, u32 *msdu_cnt)4724{4725if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {4726if (!*is_frag) {4727*total_len = info->msdu_len;4728*is_frag = true;4729}4730ath11k_dp_mon_set_frag_len(total_len,4731frag_len);4732} else {4733if (*is_frag) {4734ath11k_dp_mon_set_frag_len(total_len,4735frag_len);4736} else {4737*frag_len = info->msdu_len;4738}4739*is_frag = false;4740*msdu_cnt -= 1;4741}4742}47434744/* clang stack usage explodes if this is inlined */4745static noinline_for_stack4746u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,4747void *ring_entry, struct sk_buff **head_msdu,4748struct sk_buff **tail_msdu, u32 *npackets,4749u32 *ppdu_id)4750{4751struct ath11k_pdev_dp *dp = &ar->dp;4752struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;4753struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;4754struct sk_buff *msdu = NULL, *last = NULL;4755struct hal_rx_msdu_list msdu_list;4756void *p_buf_addr_info, *p_last_buf_addr_info;4757struct hal_rx_desc *rx_desc;4758void *rx_msdu_link_desc;4759dma_addr_t paddr;4760u16 num_msdus = 0;4761u32 rx_buf_size, rx_pkt_offset, sw_cookie;4762u32 rx_bufs_used = 0, i = 0;4763u32 msdu_ppdu_id = 0, msdu_cnt = 0;4764u32 total_len = 0, frag_len = 0;4765bool is_frag, is_first_msdu;4766bool drop_mpdu = false;4767struct ath11k_skb_rxcb *rxcb;4768struct hal_reo_entrance_ring *ent_desc = ring_entry;4769int buf_id;4770u32 rx_link_buf_info[2];4771u8 rbm;47724773if (!ar->ab->hw_params.rxdma1_enable)4774rx_ring = &dp->rx_refill_buf_ring;47754776ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,4777&sw_cookie,4778&p_last_buf_addr_info, &rbm,4779&msdu_cnt);47804781if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,4782ent_desc->info1) ==4783HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {4784u8 rxdma_err =4785FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,4786ent_desc->info1);4787if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||4788rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||4789rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {4790drop_mpdu = true;4791pmon->rx_mon_stats.dest_mpdu_drop++;4792}4793}47944795is_frag = false;4796is_first_msdu = true;47974798do {4799if (pmon->mon_last_linkdesc_paddr == paddr) {4800pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;4801return rx_bufs_used;4802}48034804if (ar->ab->hw_params.rxdma1_enable)4805rx_msdu_link_desc =4806#if defined(__linux__)4807(void *)pmon->link_desc_banks[sw_cookie].vaddr +4808#elif defined(__FreeBSD__)4809(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +4810#endif4811(paddr - pmon->link_desc_banks[sw_cookie].paddr);4812else4813rx_msdu_link_desc =4814#if defined(__linux__)4815(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +4816#elif defined(__FreeBSD__)4817(u8 *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +4818#endif4819(paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);48204821ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,4822&num_msdus);48234824for (i = 0; i < num_msdus; i++) {4825u32 l2_hdr_offset;48264827if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {4828ath11k_dbg(ar->ab, ATH11K_DBG_DATA,4829"i %d last_cookie %d is same\n",4830i, pmon->mon_last_buf_cookie);4831drop_mpdu = true;4832pmon->rx_mon_stats.dup_mon_buf_cnt++;4833continue;4834}4835buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,4836msdu_list.sw_cookie[i]);48374838spin_lock_bh(&rx_ring->idr_lock);4839msdu = idr_find(&rx_ring->bufs_idr, buf_id);4840spin_unlock_bh(&rx_ring->idr_lock);4841if (!msdu) {4842ath11k_dbg(ar->ab, ATH11K_DBG_DATA,4843"msdu_pop: invalid buf_id %d\n", buf_id);4844goto next_msdu;4845}4846rxcb = ATH11K_SKB_RXCB(msdu);4847if (!rxcb->unmapped) {4848dma_unmap_single(ar->ab->dev, rxcb->paddr,4849msdu->len +4850skb_tailroom(msdu),4851DMA_FROM_DEVICE);4852rxcb->unmapped = 1;4853}4854if (drop_mpdu) {4855ath11k_dbg(ar->ab, ATH11K_DBG_DATA,4856"i %d drop msdu %p *ppdu_id %x\n",4857i, msdu, *ppdu_id);4858dev_kfree_skb_any(msdu);4859msdu = NULL;4860goto next_msdu;4861}48624863rx_desc = (struct hal_rx_desc *)msdu->data;48644865rx_pkt_offset = sizeof(struct hal_rx_desc);4866l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);48674868if (is_first_msdu) {4869if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {4870drop_mpdu = true;4871dev_kfree_skb_any(msdu);4872msdu = NULL;4873pmon->mon_last_linkdesc_paddr = paddr;4874goto next_msdu;4875}48764877msdu_ppdu_id =4878ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);48794880if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,4881ppdu_id,4882&rx_bufs_used)) {4883if (rx_bufs_used) {4884drop_mpdu = true;4885dev_kfree_skb_any(msdu);4886msdu = NULL;4887goto next_msdu;4888}4889return rx_bufs_used;4890}4891pmon->mon_last_linkdesc_paddr = paddr;4892is_first_msdu = false;4893}4894ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],4895&is_frag, &total_len,4896&frag_len, &msdu_cnt);4897rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;48984899ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);49004901if (!(*head_msdu))4902*head_msdu = msdu;4903else if (last)4904last->next = msdu;49054906last = msdu;4907next_msdu:4908pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];4909rx_bufs_used++;4910spin_lock_bh(&rx_ring->idr_lock);4911idr_remove(&rx_ring->bufs_idr, buf_id);4912spin_unlock_bh(&rx_ring->idr_lock);4913}49144915ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);49164917ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,4918&sw_cookie, &rbm,4919&p_buf_addr_info);49204921if (ar->ab->hw_params.rxdma1_enable) {4922if (ath11k_dp_rx_monitor_link_desc_return(ar,4923p_last_buf_addr_info,4924dp->mac_id))4925ath11k_dbg(ar->ab, ATH11K_DBG_DATA,4926"dp_rx_monitor_link_desc_return failed");4927} else {4928ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,4929HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);4930}49314932p_last_buf_addr_info = p_buf_addr_info;49334934} while (paddr && msdu_cnt);49354936if (last)4937last->next = NULL;49384939*tail_msdu = msdu;49404941if (msdu_cnt == 0)4942*npackets = 1;49434944return rx_bufs_used;4945}49464947static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)4948{4949u32 rx_pkt_offset, l2_hdr_offset;49504951rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;4952l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,4953(struct hal_rx_desc *)msdu->data);4954skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);4955}49564957static struct sk_buff *4958ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,4959u32 mac_id, struct sk_buff *head_msdu,4960struct sk_buff *last_msdu,4961struct ieee80211_rx_status *rxs, bool *fcs_err)4962{4963struct ath11k_base *ab = ar->ab;4964struct sk_buff *msdu, *prev_buf;4965struct hal_rx_desc *rx_desc;4966char *hdr_desc;4967u8 *dest, decap_format;4968struct ieee80211_hdr_3addr *wh;4969struct rx_attention *rx_attention;4970u32 err_bitmap;49714972if (!head_msdu)4973goto err_merge_fail;49744975rx_desc = (struct hal_rx_desc *)head_msdu->data;4976rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);4977err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);49784979if (err_bitmap & DP_RX_MPDU_ERR_FCS)4980*fcs_err = true;49814982if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))4983return NULL;49844985decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);49864987ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);49884989if (decap_format == DP_RX_DECAP_TYPE_RAW) {4990ath11k_dp_rx_msdus_set_payload(ar, head_msdu);49914992prev_buf = head_msdu;4993msdu = head_msdu->next;49944995while (msdu) {4996ath11k_dp_rx_msdus_set_payload(ar, msdu);49974998prev_buf = msdu;4999msdu = msdu->next;5000}50015002prev_buf->next = NULL;50035004skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);5005} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {5006u8 qos_pkt = 0;50075008rx_desc = (struct hal_rx_desc *)head_msdu->data;5009hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);50105011/* Base size */5012wh = (struct ieee80211_hdr_3addr *)hdr_desc;50135014if (ieee80211_is_data_qos(wh->frame_control))5015qos_pkt = 1;50165017msdu = head_msdu;50185019while (msdu) {5020ath11k_dp_rx_msdus_set_payload(ar, msdu);5021if (qos_pkt) {5022dest = skb_push(msdu, sizeof(__le16));5023if (!dest)5024goto err_merge_fail;5025memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));5026}5027prev_buf = msdu;5028msdu = msdu->next;5029}5030dest = skb_put(prev_buf, HAL_RX_FCS_LEN);5031if (!dest)5032goto err_merge_fail;50335034ath11k_dbg(ab, ATH11K_DBG_DATA,5035"mpdu_buf %p mpdu_buf->len %u",5036prev_buf, prev_buf->len);5037} else {5038ath11k_dbg(ab, ATH11K_DBG_DATA,5039"decap format %d is not supported!\n",5040decap_format);5041goto err_merge_fail;5042}50435044return head_msdu;50455046err_merge_fail:5047return NULL;5048}50495050static void5051ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,5052u8 *rtap_buf)5053{5054u32 rtap_len = 0;50555056put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);5057rtap_len += 2;50585059put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);5060rtap_len += 2;50615062put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);5063rtap_len += 2;50645065put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);5066rtap_len += 2;50675068put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);5069rtap_len += 2;50705071put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);5072}50735074static void5075ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,5076u8 *rtap_buf)5077{5078u32 rtap_len = 0;50795080put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);5081rtap_len += 2;50825083put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);5084rtap_len += 2;50855086rtap_buf[rtap_len] = rx_status->he_RU[0];5087rtap_len += 1;50885089rtap_buf[rtap_len] = rx_status->he_RU[1];5090rtap_len += 1;50915092rtap_buf[rtap_len] = rx_status->he_RU[2];5093rtap_len += 1;50945095rtap_buf[rtap_len] = rx_status->he_RU[3];5096}50975098static void ath11k_update_radiotap(struct ath11k *ar,5099struct hal_rx_mon_ppdu_info *ppduinfo,5100struct sk_buff *mon_skb,5101struct ieee80211_rx_status *rxs)5102{5103struct ieee80211_supported_band *sband;5104u8 *ptr = NULL;51055106rxs->flag |= RX_FLAG_MACTIME_START;5107rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;51085109if (ppduinfo->nss)5110rxs->nss = ppduinfo->nss;51115112if (ppduinfo->he_mu_flags) {5113rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;5114rxs->encoding = RX_ENC_HE;5115ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));5116ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);5117} else if (ppduinfo->he_flags) {5118rxs->flag |= RX_FLAG_RADIOTAP_HE;5119rxs->encoding = RX_ENC_HE;5120ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));5121ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);5122rxs->rate_idx = ppduinfo->rate;5123} else if (ppduinfo->vht_flags) {5124rxs->encoding = RX_ENC_VHT;5125rxs->rate_idx = ppduinfo->rate;5126} else if (ppduinfo->ht_flags) {5127rxs->encoding = RX_ENC_HT;5128rxs->rate_idx = ppduinfo->rate;5129} else {5130rxs->encoding = RX_ENC_LEGACY;5131sband = &ar->mac.sbands[rxs->band];5132rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,5133ppduinfo->cck_flag);5134}51355136rxs->mactime = ppduinfo->tsft;5137}51385139static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,5140struct sk_buff *head_msdu,5141struct hal_rx_mon_ppdu_info *ppduinfo,5142struct sk_buff *tail_msdu,5143struct napi_struct *napi)5144{5145struct ath11k_pdev_dp *dp = &ar->dp;5146struct sk_buff *mon_skb, *skb_next, *header;5147struct ieee80211_rx_status *rxs = &dp->rx_status;5148bool fcs_err = false;51495150mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,5151tail_msdu, rxs, &fcs_err);51525153if (!mon_skb)5154goto mon_deliver_fail;51555156header = mon_skb;51575158rxs->flag = 0;51595160if (fcs_err)5161rxs->flag = RX_FLAG_FAILED_FCS_CRC;51625163do {5164skb_next = mon_skb->next;5165if (!skb_next)5166rxs->flag &= ~RX_FLAG_AMSDU_MORE;5167else5168rxs->flag |= RX_FLAG_AMSDU_MORE;51695170if (mon_skb == header) {5171header = NULL;5172rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;5173} else {5174rxs->flag |= RX_FLAG_ALLOW_SAME_PN;5175}5176rxs->flag |= RX_FLAG_ONLY_MONITOR;5177ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);51785179ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);5180mon_skb = skb_next;5181} while (mon_skb);5182rxs->flag = 0;51835184return 0;51855186mon_deliver_fail:5187mon_skb = head_msdu;5188while (mon_skb) {5189skb_next = mon_skb->next;5190dev_kfree_skb_any(mon_skb);5191mon_skb = skb_next;5192}5193return -EINVAL;5194}51955196/* The destination ring processing is stuck if the destination is not5197* moving while status ring moves 16 PPDU. The destination ring processing5198* skips this destination ring PPDU as a workaround.5199*/5200#define MON_DEST_RING_STUCK_MAX_CNT 1652015202static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,5203u32 quota, struct napi_struct *napi)5204{5205struct ath11k_pdev_dp *dp = &ar->dp;5206struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;5207const struct ath11k_hw_hal_params *hal_params;5208void *ring_entry;5209struct hal_srng *mon_dst_srng;5210u32 ppdu_id;5211u32 rx_bufs_used;5212u32 ring_id;5213struct ath11k_pdev_mon_stats *rx_mon_stats;5214u32 npackets = 0;5215u32 mpdu_rx_bufs_used;52165217if (ar->ab->hw_params.rxdma1_enable)5218ring_id = dp->rxdma_mon_dst_ring.ring_id;5219else5220ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;52215222mon_dst_srng = &ar->ab->hal.srng_list[ring_id];52235224spin_lock_bh(&pmon->mon_lock);52255226spin_lock_bh(&mon_dst_srng->lock);5227ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);52285229ppdu_id = pmon->mon_ppdu_info.ppdu_id;5230rx_bufs_used = 0;5231rx_mon_stats = &pmon->rx_mon_stats;52325233while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {5234struct sk_buff *head_msdu, *tail_msdu;52355236head_msdu = NULL;5237tail_msdu = NULL;52385239mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,5240&head_msdu,5241&tail_msdu,5242&npackets, &ppdu_id);52435244rx_bufs_used += mpdu_rx_bufs_used;52455246if (mpdu_rx_bufs_used) {5247dp->mon_dest_ring_stuck_cnt = 0;5248} else {5249dp->mon_dest_ring_stuck_cnt++;5250rx_mon_stats->dest_mon_not_reaped++;5251}52525253if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {5254rx_mon_stats->dest_mon_stuck++;5255ath11k_dbg(ar->ab, ATH11K_DBG_DATA,5256"status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",5257pmon->mon_ppdu_info.ppdu_id, ppdu_id,5258dp->mon_dest_ring_stuck_cnt,5259rx_mon_stats->dest_mon_not_reaped,5260rx_mon_stats->dest_mon_stuck);5261pmon->mon_ppdu_info.ppdu_id = ppdu_id;5262continue;5263}52645265if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {5266pmon->mon_ppdu_status = DP_PPDU_STATUS_START;5267ath11k_dbg(ar->ab, ATH11K_DBG_DATA,5268"dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",5269ppdu_id, pmon->mon_ppdu_info.ppdu_id,5270rx_mon_stats->dest_mon_not_reaped,5271rx_mon_stats->dest_mon_stuck);5272break;5273}5274if (head_msdu && tail_msdu) {5275ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,5276&pmon->mon_ppdu_info,5277tail_msdu, napi);5278rx_mon_stats->dest_mpdu_done++;5279}52805281ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,5282mon_dst_srng);5283}5284ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);5285spin_unlock_bh(&mon_dst_srng->lock);52865287spin_unlock_bh(&pmon->mon_lock);52885289if (rx_bufs_used) {5290rx_mon_stats->dest_ppdu_done++;5291hal_params = ar->ab->hw_params.hal_params;52925293if (ar->ab->hw_params.rxdma1_enable)5294ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,5295&dp->rxdma_mon_buf_ring,5296rx_bufs_used,5297hal_params->rx_buf_rbm);5298else5299ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,5300&dp->rx_refill_buf_ring,5301rx_bufs_used,5302hal_params->rx_buf_rbm);5303}5304}53055306int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,5307struct napi_struct *napi, int budget)5308{5309struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);5310enum hal_rx_mon_status hal_status;5311struct sk_buff *skb;5312struct sk_buff_head skb_list;5313struct ath11k_peer *peer;5314struct ath11k_sta *arsta;5315int num_buffs_reaped = 0;5316u32 rx_buf_sz;5317u16 log_type;5318struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;5319struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;5320struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;53215322__skb_queue_head_init(&skb_list);53235324num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,5325&skb_list);5326if (!num_buffs_reaped)5327goto exit;53285329memset(ppdu_info, 0, sizeof(*ppdu_info));5330ppdu_info->peer_id = HAL_INVALID_PEERID;53315332while ((skb = __skb_dequeue(&skb_list))) {5333if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {5334log_type = ATH11K_PKTLOG_TYPE_LITE_RX;5335rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;5336} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {5337log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;5338rx_buf_sz = DP_RX_BUFFER_SIZE;5339} else {5340log_type = ATH11K_PKTLOG_TYPE_INVALID;5341rx_buf_sz = 0;5342}53435344if (log_type != ATH11K_PKTLOG_TYPE_INVALID)5345trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);53465347memset(ppdu_info, 0, sizeof(*ppdu_info));5348ppdu_info->peer_id = HAL_INVALID_PEERID;5349hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);53505351if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&5352pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&5353hal_status == HAL_TLV_STATUS_PPDU_DONE) {5354rx_mon_stats->status_ppdu_done++;5355pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;5356if (!ab->hw_params.full_monitor_mode) {5357ath11k_dp_rx_mon_dest_process(ar, mac_id,5358budget, napi);5359pmon->mon_ppdu_status = DP_PPDU_STATUS_START;5360}5361}53625363if (ppdu_info->peer_id == HAL_INVALID_PEERID ||5364hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {5365dev_kfree_skb_any(skb);5366continue;5367}53685369rcu_read_lock();5370spin_lock_bh(&ab->base_lock);5371peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);53725373if (!peer || !peer->sta) {5374ath11k_dbg(ab, ATH11K_DBG_DATA,5375"failed to find the peer with peer_id %d\n",5376ppdu_info->peer_id);5377goto next_skb;5378}53795380arsta = ath11k_sta_to_arsta(peer->sta);5381ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);53825383if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))5384trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);53855386next_skb:5387spin_unlock_bh(&ab->base_lock);5388rcu_read_unlock();53895390dev_kfree_skb_any(skb);5391memset(ppdu_info, 0, sizeof(*ppdu_info));5392ppdu_info->peer_id = HAL_INVALID_PEERID;5393}5394exit:5395return num_buffs_reaped;5396}53975398static u325399ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,5400void *ring_entry, struct sk_buff **head_msdu,5401struct sk_buff **tail_msdu,5402struct hal_sw_mon_ring_entries *sw_mon_entries)5403{5404struct ath11k_pdev_dp *dp = &ar->dp;5405struct ath11k_mon_data *pmon = &dp->mon_data;5406struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;5407struct sk_buff *msdu = NULL, *last = NULL;5408struct hal_sw_monitor_ring *sw_desc = ring_entry;5409struct hal_rx_msdu_list msdu_list;5410struct hal_rx_desc *rx_desc;5411struct ath11k_skb_rxcb *rxcb;5412void *rx_msdu_link_desc;5413void *p_buf_addr_info, *p_last_buf_addr_info;5414int buf_id, i = 0;5415u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;5416u32 rx_bufs_used = 0, msdu_cnt = 0;5417u32 total_len = 0, frag_len = 0, sw_cookie;5418u16 num_msdus = 0;5419u8 rxdma_err, rbm;5420bool is_frag, is_first_msdu;5421bool drop_mpdu = false;54225423ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);54245425sw_cookie = sw_mon_entries->mon_dst_sw_cookie;5426sw_mon_entries->end_of_ppdu = false;5427sw_mon_entries->drop_ppdu = false;5428p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;5429msdu_cnt = sw_mon_entries->msdu_cnt;54305431sw_mon_entries->end_of_ppdu =5432FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);5433if (sw_mon_entries->end_of_ppdu)5434return rx_bufs_used;54355436if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,5437sw_desc->info0) ==5438HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {5439rxdma_err =5440FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,5441sw_desc->info0);5442if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||5443rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||5444rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {5445pmon->rx_mon_stats.dest_mpdu_drop++;5446drop_mpdu = true;5447}5448}54495450is_frag = false;5451is_first_msdu = true;54525453do {5454rx_msdu_link_desc =5455(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +5456(sw_mon_entries->mon_dst_paddr -5457pmon->link_desc_banks[sw_cookie].paddr);54585459ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,5460&num_msdus);54615462for (i = 0; i < num_msdus; i++) {5463buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,5464msdu_list.sw_cookie[i]);54655466spin_lock_bh(&rx_ring->idr_lock);5467msdu = idr_find(&rx_ring->bufs_idr, buf_id);5468if (!msdu) {5469ath11k_dbg(ar->ab, ATH11K_DBG_DATA,5470"full mon msdu_pop: invalid buf_id %d\n",5471buf_id);5472spin_unlock_bh(&rx_ring->idr_lock);5473goto next_msdu;5474}5475idr_remove(&rx_ring->bufs_idr, buf_id);5476spin_unlock_bh(&rx_ring->idr_lock);54775478rxcb = ATH11K_SKB_RXCB(msdu);5479if (!rxcb->unmapped) {5480dma_unmap_single(ar->ab->dev, rxcb->paddr,5481msdu->len +5482skb_tailroom(msdu),5483DMA_FROM_DEVICE);5484rxcb->unmapped = 1;5485}5486if (drop_mpdu) {5487ath11k_dbg(ar->ab, ATH11K_DBG_DATA,5488"full mon: i %d drop msdu %p *ppdu_id %x\n",5489i, msdu, sw_mon_entries->ppdu_id);5490dev_kfree_skb_any(msdu);5491msdu_cnt--;5492goto next_msdu;5493}54945495rx_desc = (struct hal_rx_desc *)msdu->data;54965497rx_pkt_offset = sizeof(struct hal_rx_desc);5498l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);54995500if (is_first_msdu) {5501if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {5502drop_mpdu = true;5503dev_kfree_skb_any(msdu);5504msdu = NULL;5505goto next_msdu;5506}5507is_first_msdu = false;5508}55095510ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],5511&is_frag, &total_len,5512&frag_len, &msdu_cnt);55135514rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;55155516ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);55175518if (!(*head_msdu))5519*head_msdu = msdu;5520else if (last)5521last->next = msdu;55225523last = msdu;5524next_msdu:5525rx_bufs_used++;5526}55275528ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,5529&sw_mon_entries->mon_dst_paddr,5530&sw_mon_entries->mon_dst_sw_cookie,5531&rbm,5532&p_buf_addr_info);55335534if (ath11k_dp_rx_monitor_link_desc_return(ar,5535p_last_buf_addr_info,5536dp->mac_id))5537ath11k_dbg(ar->ab, ATH11K_DBG_DATA,5538"full mon: dp_rx_monitor_link_desc_return failed\n");55395540p_last_buf_addr_info = p_buf_addr_info;55415542} while (sw_mon_entries->mon_dst_paddr && msdu_cnt);55435544if (last)5545last->next = NULL;55465547*tail_msdu = msdu;55485549return rx_bufs_used;5550}55515552static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,5553struct dp_full_mon_mpdu *mon_mpdu,5554struct sk_buff *head,5555struct sk_buff *tail)5556{5557mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);5558if (!mon_mpdu)5559return -ENOMEM;55605561list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);5562mon_mpdu->head = head;5563mon_mpdu->tail = tail;55645565return 0;5566}55675568static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,5569struct dp_full_mon_mpdu *mon_mpdu)5570{5571struct dp_full_mon_mpdu *tmp;5572struct sk_buff *tmp_msdu, *skb_next;55735574if (list_empty(&dp->dp_full_mon_mpdu_list))5575return;55765577list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {5578list_del(&mon_mpdu->list);55795580tmp_msdu = mon_mpdu->head;5581while (tmp_msdu) {5582skb_next = tmp_msdu->next;5583dev_kfree_skb_any(tmp_msdu);5584tmp_msdu = skb_next;5585}55865587kfree(mon_mpdu);5588}5589}55905591static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,5592int mac_id,5593struct ath11k_mon_data *pmon,5594struct napi_struct *napi)5595{5596struct ath11k_pdev_mon_stats *rx_mon_stats;5597struct dp_full_mon_mpdu *tmp;5598struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;5599struct sk_buff *head_msdu, *tail_msdu;5600struct ath11k_base *ab = ar->ab;5601struct ath11k_dp *dp = &ab->dp;5602int ret;56035604rx_mon_stats = &pmon->rx_mon_stats;56055606list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {5607list_del(&mon_mpdu->list);5608head_msdu = mon_mpdu->head;5609tail_msdu = mon_mpdu->tail;5610if (head_msdu && tail_msdu) {5611ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,5612&pmon->mon_ppdu_info,5613tail_msdu, napi);5614rx_mon_stats->dest_mpdu_done++;5615ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");5616}5617kfree(mon_mpdu);5618}56195620return ret;5621}56225623static int5624ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,5625struct napi_struct *napi, int budget)5626{5627struct ath11k *ar = ab->pdevs[mac_id].ar;5628struct ath11k_pdev_dp *dp = &ar->dp;5629struct ath11k_mon_data *pmon = &dp->mon_data;5630struct hal_sw_mon_ring_entries *sw_mon_entries;5631int quota = 0, work = 0, count;56325633sw_mon_entries = &pmon->sw_mon_entries;56345635while (pmon->hold_mon_dst_ring) {5636quota = ath11k_dp_rx_process_mon_status(ab, mac_id,5637napi, 1);5638if (pmon->buf_state == DP_MON_STATUS_MATCH) {5639count = sw_mon_entries->status_buf_count;5640if (count > 1) {5641quota += ath11k_dp_rx_process_mon_status(ab, mac_id,5642napi, count);5643}56445645ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,5646pmon, napi);5647pmon->hold_mon_dst_ring = false;5648} else if (!pmon->mon_status_paddr ||5649pmon->buf_state == DP_MON_STATUS_LEAD) {5650sw_mon_entries->drop_ppdu = true;5651pmon->hold_mon_dst_ring = false;5652}56535654if (!quota)5655break;56565657work += quota;5658}56595660if (sw_mon_entries->drop_ppdu)5661ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);56625663return work;5664}56655666static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,5667struct napi_struct *napi, int budget)5668{5669struct ath11k *ar = ab->pdevs[mac_id].ar;5670struct ath11k_pdev_dp *dp = &ar->dp;5671struct ath11k_mon_data *pmon = &dp->mon_data;5672struct hal_sw_mon_ring_entries *sw_mon_entries;5673struct ath11k_pdev_mon_stats *rx_mon_stats;5674struct sk_buff *head_msdu, *tail_msdu;5675struct hal_srng *mon_dst_srng;5676void *ring_entry;5677u32 rx_bufs_used = 0, mpdu_rx_bufs_used;5678int quota = 0, ret;5679bool break_dst_ring = false;56805681spin_lock_bh(&pmon->mon_lock);56825683sw_mon_entries = &pmon->sw_mon_entries;5684rx_mon_stats = &pmon->rx_mon_stats;56855686if (pmon->hold_mon_dst_ring) {5687spin_unlock_bh(&pmon->mon_lock);5688goto reap_status_ring;5689}56905691mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];5692spin_lock_bh(&mon_dst_srng->lock);56935694ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);5695while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {5696head_msdu = NULL;5697tail_msdu = NULL;56985699mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,5700&head_msdu,5701&tail_msdu,5702sw_mon_entries);5703rx_bufs_used += mpdu_rx_bufs_used;57045705if (!sw_mon_entries->end_of_ppdu) {5706if (head_msdu) {5707ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,5708pmon->mon_mpdu,5709head_msdu,5710tail_msdu);5711if (ret)5712break_dst_ring = true;5713}57145715goto next_entry;5716} else {5717if (!sw_mon_entries->ppdu_id &&5718!sw_mon_entries->mon_status_paddr) {5719break_dst_ring = true;5720goto next_entry;5721}5722}57235724rx_mon_stats->dest_ppdu_done++;5725pmon->mon_ppdu_status = DP_PPDU_STATUS_START;5726pmon->buf_state = DP_MON_STATUS_LAG;5727pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;5728pmon->hold_mon_dst_ring = true;5729next_entry:5730ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,5731mon_dst_srng);5732if (break_dst_ring)5733break;5734}57355736ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);5737spin_unlock_bh(&mon_dst_srng->lock);5738spin_unlock_bh(&pmon->mon_lock);57395740if (rx_bufs_used) {5741ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,5742&dp->rxdma_mon_buf_ring,5743rx_bufs_used,5744HAL_RX_BUF_RBM_SW3_BM);5745}57465747reap_status_ring:5748quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,5749napi, budget);57505751return quota;5752}57535754int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,5755struct napi_struct *napi, int budget)5756{5757struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);5758int ret = 0;57595760if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&5761ab->hw_params.full_monitor_mode)5762ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);5763else5764ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);57655766return ret;5767}57685769static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)5770{5771struct ath11k_pdev_dp *dp = &ar->dp;5772struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;57735774pmon->mon_ppdu_status = DP_PPDU_STATUS_START;57755776memset(&pmon->rx_mon_stats, 0,5777sizeof(pmon->rx_mon_stats));5778return 0;5779}57805781int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)5782{5783struct ath11k_pdev_dp *dp = &ar->dp;5784struct ath11k_mon_data *pmon = &dp->mon_data;5785struct hal_srng *mon_desc_srng = NULL;5786struct dp_srng *dp_srng;5787int ret = 0;5788u32 n_link_desc = 0;57895790ret = ath11k_dp_rx_pdev_mon_status_attach(ar);5791if (ret) {5792ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");5793return ret;5794}57955796/* if rxdma1_enable is false, no need to setup5797* rxdma_mon_desc_ring.5798*/5799if (!ar->ab->hw_params.rxdma1_enable)5800return 0;58015802dp_srng = &dp->rxdma_mon_desc_ring;5803n_link_desc = dp_srng->size /5804ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);5805mon_desc_srng =5806&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];58075808ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,5809HAL_RXDMA_MONITOR_DESC, mon_desc_srng,5810n_link_desc);5811if (ret) {5812ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");5813return ret;5814}5815pmon->mon_last_linkdesc_paddr = 0;5816pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;5817spin_lock_init(&pmon->mon_lock);58185819return 0;5820}58215822static int ath11k_dp_mon_link_free(struct ath11k *ar)5823{5824struct ath11k_pdev_dp *dp = &ar->dp;5825struct ath11k_mon_data *pmon = &dp->mon_data;58265827ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,5828HAL_RXDMA_MONITOR_DESC,5829&dp->rxdma_mon_desc_ring);5830return 0;5831}58325833int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)5834{5835ath11k_dp_mon_link_free(ar);5836return 0;5837}58385839int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)5840{5841/* start reap timer */5842mod_timer(&ab->mon_reap_timer,5843jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));58445845return 0;5846}58475848int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)5849{5850int ret;58515852if (stop_timer)5853timer_delete_sync(&ab->mon_reap_timer);58545855/* reap all the monitor related rings */5856ret = ath11k_dp_purge_mon_ring(ab);5857if (ret) {5858ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);5859return ret;5860}58615862return 0;5863}586458655866