Path: blob/main/sys/contrib/dev/athk/ath12k/dp_rx.c
48375 views
// SPDX-License-Identifier: BSD-3-Clause-Clear1/*2* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.3* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.4*/56#include <linux/ieee80211.h>7#include <linux/kernel.h>8#include <linux/skbuff.h>9#include <crypto/hash.h>10#include "core.h"11#include "debug.h"12#include "hal_desc.h"13#include "hw.h"14#include "dp_rx.h"15#include "hal_rx.h"16#include "dp_tx.h"17#include "peer.h"18#include "dp_mon.h"1920#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)2122static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,23struct hal_rx_desc *desc)24{25if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))26return HAL_ENCRYPT_TYPE_OPEN;2728return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);29}3031u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,32struct hal_rx_desc *desc)33{34return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);35}3637static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,38struct hal_rx_desc *desc)39{40return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);41}4243static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,44struct hal_rx_desc *desc)45{46return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);47}4849static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,50struct hal_rx_desc *desc)51{52return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);53}5455static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,56struct sk_buff *skb)57{58struct ieee80211_hdr *hdr;5960hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);61return ieee80211_has_morefrags(hdr->frame_control);62}6364static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,65struct sk_buff *skb)66{67struct ieee80211_hdr *hdr;6869hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);70return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;71}7273static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,74struct hal_rx_desc *desc)75{76return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);77}7879static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,80struct hal_rx_desc *desc)81{82return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);83}8485static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,86struct hal_rx_desc *desc)87{88return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);89}9091static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,92struct hal_rx_desc *desc)93{94return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);95}9697static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,98struct hal_rx_desc *desc)99{100return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);101}102103u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,104struct hal_rx_desc *desc)105{106return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);107}108109static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,110struct hal_rx_desc *desc)111{112return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);113}114115static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,116struct hal_rx_desc *desc)117{118return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);119}120121static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,122struct hal_rx_desc *desc)123{124return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);125}126127static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,128struct hal_rx_desc *desc)129{130return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);131}132133static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,134struct hal_rx_desc *desc)135{136return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);137}138139static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,140struct hal_rx_desc *desc)141{142return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);143}144145static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,146struct hal_rx_desc *desc)147{148return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));149}150151static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,152struct hal_rx_desc *desc)153{154return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);155}156157static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,158struct hal_rx_desc *desc)159{160return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);161}162163u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,164struct hal_rx_desc *desc)165{166return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);167}168169static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,170struct hal_rx_desc *desc)171{172return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);173}174175static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,176struct hal_rx_desc *desc)177{178return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);179}180181static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,182struct hal_rx_desc *fdesc,183struct hal_rx_desc *ldesc)184{185ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);186}187188static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,189struct hal_rx_desc *desc,190u16 len)191{192ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);193}194195static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,196struct hal_rx_desc *desc)197{198return (ath12k_dp_rx_h_first_msdu(ab, desc) &&199ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));200}201202static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,203struct hal_rx_desc *desc)204{205return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);206}207208static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,209struct hal_rx_desc *desc)210{211return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);212}213214static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,215struct hal_rx_desc *desc,216struct ieee80211_hdr *hdr)217{218ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);219}220221static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,222struct hal_rx_desc *desc,223u8 *crypto_hdr,224enum hal_encrypt_type enctype)225{226ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);227}228229static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,230struct hal_rx_desc *desc)231{232return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);233}234235static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)236{237int i, reaped = 0;238unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);239240do {241for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)242reaped += ath12k_dp_mon_process_ring(ab, i, NULL,243DP_MON_SERVICE_BUDGET,244ATH12K_DP_RX_MONITOR_MODE);245246/* nothing more to reap */247if (reaped < DP_MON_SERVICE_BUDGET)248return 0;249250} while (time_before(jiffies, timeout));251252ath12k_warn(ab, "dp mon ring purge timeout");253254return -ETIMEDOUT;255}256257/* Returns number of Rx buffers replenished */258int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,259struct dp_rxdma_ring *rx_ring,260int req_entries,261enum hal_rx_buf_return_buf_manager mgr,262bool hw_cc)263{264struct ath12k_buffer_addr *desc;265struct hal_srng *srng;266struct sk_buff *skb;267int num_free;268int num_remain;269int buf_id;270u32 cookie;271dma_addr_t paddr;272struct ath12k_dp *dp = &ab->dp;273struct ath12k_rx_desc_info *rx_desc;274275req_entries = min(req_entries, rx_ring->bufs_max);276277srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];278279spin_lock_bh(&srng->lock);280281ath12k_hal_srng_access_begin(ab, srng);282283num_free = ath12k_hal_srng_src_num_free(ab, srng, true);284if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))285req_entries = num_free;286287req_entries = min(num_free, req_entries);288num_remain = req_entries;289290while (num_remain > 0) {291skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +292DP_RX_BUFFER_ALIGN_SIZE);293if (!skb)294break;295296if (!IS_ALIGNED((unsigned long)skb->data,297DP_RX_BUFFER_ALIGN_SIZE)) {298skb_pull(skb,299PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -300skb->data);301}302303paddr = dma_map_single(ab->dev, skb->data,304skb->len + skb_tailroom(skb),305DMA_FROM_DEVICE);306if (dma_mapping_error(ab->dev, paddr))307goto fail_free_skb;308309if (hw_cc) {310spin_lock_bh(&dp->rx_desc_lock);311312/* Get desc from free list and store in used list313* for cleanup purposes314*315* TODO: pass the removed descs rather than316* add/read to optimize317*/318rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,319struct ath12k_rx_desc_info,320list);321if (!rx_desc) {322spin_unlock_bh(&dp->rx_desc_lock);323goto fail_dma_unmap;324}325326rx_desc->skb = skb;327cookie = rx_desc->cookie;328list_del(&rx_desc->list);329list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);330331spin_unlock_bh(&dp->rx_desc_lock);332} else {333spin_lock_bh(&rx_ring->idr_lock);334buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,335rx_ring->bufs_max * 3, GFP_ATOMIC);336spin_unlock_bh(&rx_ring->idr_lock);337if (buf_id < 0)338goto fail_dma_unmap;339cookie = u32_encode_bits(mac_id,340DP_RXDMA_BUF_COOKIE_PDEV_ID) |341u32_encode_bits(buf_id,342DP_RXDMA_BUF_COOKIE_BUF_ID);343}344345desc = ath12k_hal_srng_src_get_next_entry(ab, srng);346if (!desc)347goto fail_buf_unassign;348349ATH12K_SKB_RXCB(skb)->paddr = paddr;350351num_remain--;352353ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);354}355356ath12k_hal_srng_access_end(ab, srng);357358spin_unlock_bh(&srng->lock);359360return req_entries - num_remain;361362fail_buf_unassign:363if (hw_cc) {364spin_lock_bh(&dp->rx_desc_lock);365list_del(&rx_desc->list);366list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);367rx_desc->skb = NULL;368spin_unlock_bh(&dp->rx_desc_lock);369} else {370spin_lock_bh(&rx_ring->idr_lock);371idr_remove(&rx_ring->bufs_idr, buf_id);372spin_unlock_bh(&rx_ring->idr_lock);373}374fail_dma_unmap:375dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),376DMA_FROM_DEVICE);377fail_free_skb:378dev_kfree_skb_any(skb);379380ath12k_hal_srng_access_end(ab, srng);381382spin_unlock_bh(&srng->lock);383384return req_entries - num_remain;385}386387static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,388struct dp_rxdma_ring *rx_ring)389{390struct sk_buff *skb;391int buf_id;392393spin_lock_bh(&rx_ring->idr_lock);394idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {395idr_remove(&rx_ring->bufs_idr, buf_id);396/* TODO: Understand where internal driver does this dma_unmap397* of rxdma_buffer.398*/399dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,400skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);401dev_kfree_skb_any(skb);402}403404idr_destroy(&rx_ring->bufs_idr);405spin_unlock_bh(&rx_ring->idr_lock);406407return 0;408}409410static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)411{412struct ath12k_dp *dp = &ab->dp;413struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;414415ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);416417rx_ring = &dp->rxdma_mon_buf_ring;418ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);419420rx_ring = &dp->tx_mon_buf_ring;421ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);422423return 0;424}425426static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,427struct dp_rxdma_ring *rx_ring,428u32 ringtype)429{430int num_entries;431432num_entries = rx_ring->refill_buf_ring.size /433ath12k_hal_srng_get_entrysize(ab, ringtype);434435rx_ring->bufs_max = num_entries;436if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))437ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);438else439ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,440ab->hw_params->hal_params->rx_buf_rbm,441ringtype == HAL_RXDMA_BUF);442return 0;443}444445static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)446{447struct ath12k_dp *dp = &ab->dp;448struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;449int ret;450451ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,452HAL_RXDMA_BUF);453if (ret) {454ath12k_warn(ab,455"failed to setup HAL_RXDMA_BUF\n");456return ret;457}458459if (ab->hw_params->rxdma1_enable) {460rx_ring = &dp->rxdma_mon_buf_ring;461ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,462HAL_RXDMA_MONITOR_BUF);463if (ret) {464ath12k_warn(ab,465"failed to setup HAL_RXDMA_MONITOR_BUF\n");466return ret;467}468469rx_ring = &dp->tx_mon_buf_ring;470ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,471HAL_TX_MONITOR_BUF);472if (ret) {473ath12k_warn(ab,474"failed to setup HAL_TX_MONITOR_BUF\n");475return ret;476}477}478479return 0;480}481482static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)483{484struct ath12k_pdev_dp *dp = &ar->dp;485struct ath12k_base *ab = ar->ab;486int i;487488for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {489ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);490ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);491}492}493494void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)495{496struct ath12k_dp *dp = &ab->dp;497int i;498499for (i = 0; i < DP_REO_DST_RING_MAX; i++)500ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);501}502503int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)504{505struct ath12k_dp *dp = &ab->dp;506int ret;507int i;508509for (i = 0; i < DP_REO_DST_RING_MAX; i++) {510ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],511HAL_REO_DST, i, 0,512DP_REO_DST_RING_SIZE);513if (ret) {514ath12k_warn(ab, "failed to setup reo_dst_ring\n");515goto err_reo_cleanup;516}517}518519return 0;520521err_reo_cleanup:522ath12k_dp_rx_pdev_reo_cleanup(ab);523524return ret;525}526527static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)528{529struct ath12k_pdev_dp *dp = &ar->dp;530struct ath12k_base *ab = ar->ab;531int i;532int ret;533u32 mac_id = dp->mac_id;534535for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {536ret = ath12k_dp_srng_setup(ar->ab,537&dp->rxdma_mon_dst_ring[i],538HAL_RXDMA_MONITOR_DST,5390, mac_id + i,540DP_RXDMA_MONITOR_DST_RING_SIZE);541if (ret) {542ath12k_warn(ar->ab,543"failed to setup HAL_RXDMA_MONITOR_DST\n");544return ret;545}546547ret = ath12k_dp_srng_setup(ar->ab,548&dp->tx_mon_dst_ring[i],549HAL_TX_MONITOR_DST,5500, mac_id + i,551DP_TX_MONITOR_DEST_RING_SIZE);552if (ret) {553ath12k_warn(ar->ab,554"failed to setup HAL_TX_MONITOR_DST\n");555return ret;556}557}558559return 0;560}561562void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)563{564struct ath12k_dp *dp = &ab->dp;565struct ath12k_dp_rx_reo_cmd *cmd, *tmp;566struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;567568spin_lock_bh(&dp->reo_cmd_lock);569list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {570list_del(&cmd->list);571dma_unmap_single(ab->dev, cmd->data.paddr,572cmd->data.size, DMA_BIDIRECTIONAL);573kfree(cmd->data.vaddr);574kfree(cmd);575}576577list_for_each_entry_safe(cmd_cache, tmp_cache,578&dp->reo_cmd_cache_flush_list, list) {579list_del(&cmd_cache->list);580dp->reo_cmd_cache_flush_count--;581dma_unmap_single(ab->dev, cmd_cache->data.paddr,582cmd_cache->data.size, DMA_BIDIRECTIONAL);583kfree(cmd_cache->data.vaddr);584kfree(cmd_cache);585}586spin_unlock_bh(&dp->reo_cmd_lock);587}588589static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,590enum hal_reo_cmd_status status)591{592struct ath12k_dp_rx_tid *rx_tid = ctx;593594if (status != HAL_REO_CMD_SUCCESS)595ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",596rx_tid->tid, status);597598dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,599DMA_BIDIRECTIONAL);600kfree(rx_tid->vaddr);601rx_tid->vaddr = NULL;602}603604static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,605enum hal_reo_cmd_type type,606struct ath12k_hal_reo_cmd *cmd,607void (*cb)(struct ath12k_dp *dp, void *ctx,608enum hal_reo_cmd_status status))609{610struct ath12k_dp *dp = &ab->dp;611struct ath12k_dp_rx_reo_cmd *dp_cmd;612struct hal_srng *cmd_ring;613int cmd_num;614615cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];616cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);617618/* cmd_num should start from 1, during failure return the error code */619if (cmd_num < 0)620return cmd_num;621622/* reo cmd ring descriptors has cmd_num starting from 1 */623if (cmd_num == 0)624return -EINVAL;625626if (!cb)627return 0;628629/* Can this be optimized so that we keep the pending command list only630* for tid delete command to free up the resource on the command status631* indication?632*/633dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);634635if (!dp_cmd)636return -ENOMEM;637638memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));639dp_cmd->cmd_num = cmd_num;640dp_cmd->handler = cb;641642spin_lock_bh(&dp->reo_cmd_lock);643list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);644spin_unlock_bh(&dp->reo_cmd_lock);645646return 0;647}648649static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,650struct ath12k_dp_rx_tid *rx_tid)651{652struct ath12k_hal_reo_cmd cmd = {0};653unsigned long tot_desc_sz, desc_sz;654int ret;655656tot_desc_sz = rx_tid->size;657desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);658659while (tot_desc_sz > desc_sz) {660tot_desc_sz -= desc_sz;661cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);662cmd.addr_hi = upper_32_bits(rx_tid->paddr);663ret = ath12k_dp_reo_cmd_send(ab, rx_tid,664HAL_REO_CMD_FLUSH_CACHE, &cmd,665NULL);666if (ret)667ath12k_warn(ab,668"failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",669rx_tid->tid, ret);670}671672memset(&cmd, 0, sizeof(cmd));673cmd.addr_lo = lower_32_bits(rx_tid->paddr);674cmd.addr_hi = upper_32_bits(rx_tid->paddr);675cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;676ret = ath12k_dp_reo_cmd_send(ab, rx_tid,677HAL_REO_CMD_FLUSH_CACHE,678&cmd, ath12k_dp_reo_cmd_free);679if (ret) {680ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",681rx_tid->tid, ret);682dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,683DMA_BIDIRECTIONAL);684kfree(rx_tid->vaddr);685rx_tid->vaddr = NULL;686}687}688689static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,690enum hal_reo_cmd_status status)691{692struct ath12k_base *ab = dp->ab;693struct ath12k_dp_rx_tid *rx_tid = ctx;694struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;695696if (status == HAL_REO_CMD_DRAIN) {697goto free_desc;698} else if (status != HAL_REO_CMD_SUCCESS) {699/* Shouldn't happen! Cleanup in case of other failure? */700ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",701rx_tid->tid, status);702return;703}704705elem = kzalloc(sizeof(*elem), GFP_ATOMIC);706if (!elem)707goto free_desc;708709elem->ts = jiffies;710memcpy(&elem->data, rx_tid, sizeof(*rx_tid));711712spin_lock_bh(&dp->reo_cmd_lock);713list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);714dp->reo_cmd_cache_flush_count++;715716/* Flush and invalidate aged REO desc from HW cache */717list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,718list) {719if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||720time_after(jiffies, elem->ts +721msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {722list_del(&elem->list);723dp->reo_cmd_cache_flush_count--;724725/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()726* within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list727* is used in only two contexts, one is in this function called728* from napi and the other in ath12k_dp_free during core destroy.729* Before dp_free, the irqs would be disabled and would wait to730* synchronize. Hence there wouldn’t be any race against add or731* delete to this list. Hence unlock-lock is safe here.732*/733spin_unlock_bh(&dp->reo_cmd_lock);734735ath12k_dp_reo_cache_flush(ab, &elem->data);736kfree(elem);737spin_lock_bh(&dp->reo_cmd_lock);738}739}740spin_unlock_bh(&dp->reo_cmd_lock);741742return;743free_desc:744dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,745DMA_BIDIRECTIONAL);746kfree(rx_tid->vaddr);747rx_tid->vaddr = NULL;748}749750static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,751dma_addr_t paddr)752{753struct ath12k_reo_queue_ref *qref;754struct ath12k_dp *dp = &ab->dp;755756if (!ab->hw_params->reoq_lut_support)757return;758759/* TODO: based on ML peer or not, select the LUT. below assumes non760* ML peer761*/762qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +763(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);764765qref->info0 = u32_encode_bits(lower_32_bits(paddr),766BUFFER_ADDR_INFO0_ADDR);767qref->info1 = u32_encode_bits(upper_32_bits(paddr),768BUFFER_ADDR_INFO1_ADDR) |769u32_encode_bits(tid, DP_REO_QREF_NUM);770}771772static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)773{774struct ath12k_reo_queue_ref *qref;775struct ath12k_dp *dp = &ab->dp;776777if (!ab->hw_params->reoq_lut_support)778return;779780/* TODO: based on ML peer or not, select the LUT. below assumes non781* ML peer782*/783qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +784(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);785786qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);787qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |788u32_encode_bits(tid, DP_REO_QREF_NUM);789}790791void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,792struct ath12k_peer *peer, u8 tid)793{794struct ath12k_hal_reo_cmd cmd = {0};795struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];796int ret;797798if (!rx_tid->active)799return;800801cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;802cmd.addr_lo = lower_32_bits(rx_tid->paddr);803cmd.addr_hi = upper_32_bits(rx_tid->paddr);804cmd.upd0 = HAL_REO_CMD_UPD0_VLD;805ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,806HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,807ath12k_dp_rx_tid_del_func);808if (ret) {809ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",810tid, ret);811dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,812DMA_BIDIRECTIONAL);813kfree(rx_tid->vaddr);814rx_tid->vaddr = NULL;815}816817ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);818819rx_tid->active = false;820}821822/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted823* to struct hal_wbm_release_ring, I couldn't figure out the logic behind824* that.825*/826static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,827struct hal_reo_dest_ring *ring,828enum hal_wbm_rel_bm_act action)829{830struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;831struct hal_wbm_release_ring *desc;832struct ath12k_dp *dp = &ab->dp;833struct hal_srng *srng;834int ret = 0;835836srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];837838spin_lock_bh(&srng->lock);839840ath12k_hal_srng_access_begin(ab, srng);841842desc = ath12k_hal_srng_src_get_next_entry(ab, srng);843if (!desc) {844ret = -ENOBUFS;845goto exit;846}847848ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);849850exit:851ath12k_hal_srng_access_end(ab, srng);852853spin_unlock_bh(&srng->lock);854855return ret;856}857858static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,859bool rel_link_desc)860{861struct ath12k_base *ab = rx_tid->ab;862863lockdep_assert_held(&ab->base_lock);864865if (rx_tid->dst_ring_desc) {866if (rel_link_desc)867ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,868HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);869kfree(rx_tid->dst_ring_desc);870rx_tid->dst_ring_desc = NULL;871}872873rx_tid->cur_sn = 0;874rx_tid->last_frag_no = 0;875rx_tid->rx_frag_bitmap = 0;876__skb_queue_purge(&rx_tid->rx_frags);877}878879void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)880{881struct ath12k_dp_rx_tid *rx_tid;882int i;883884lockdep_assert_held(&ar->ab->base_lock);885886for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {887rx_tid = &peer->rx_tid[i];888889ath12k_dp_rx_peer_tid_delete(ar, peer, i);890ath12k_dp_rx_frags_cleanup(rx_tid, true);891892spin_unlock_bh(&ar->ab->base_lock);893del_timer_sync(&rx_tid->frag_timer);894spin_lock_bh(&ar->ab->base_lock);895}896}897898static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,899struct ath12k_peer *peer,900struct ath12k_dp_rx_tid *rx_tid,901u32 ba_win_sz, u16 ssn,902bool update_ssn)903{904struct ath12k_hal_reo_cmd cmd = {0};905int ret;906907cmd.addr_lo = lower_32_bits(rx_tid->paddr);908cmd.addr_hi = upper_32_bits(rx_tid->paddr);909cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;910cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;911cmd.ba_window_size = ba_win_sz;912913if (update_ssn) {914cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;915cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);916}917918ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,919HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,920NULL);921if (ret) {922ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",923rx_tid->tid, ret);924return ret;925}926927rx_tid->ba_win_sz = ba_win_sz;928929return 0;930}931932int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,933u8 tid, u32 ba_win_sz, u16 ssn,934enum hal_pn_type pn_type)935{936struct ath12k_base *ab = ar->ab;937struct ath12k_dp *dp = &ab->dp;938struct hal_rx_reo_queue *addr_aligned;939struct ath12k_peer *peer;940struct ath12k_dp_rx_tid *rx_tid;941u32 hw_desc_sz;942void *vaddr;943dma_addr_t paddr;944int ret;945946spin_lock_bh(&ab->base_lock);947948peer = ath12k_peer_find(ab, vdev_id, peer_mac);949if (!peer) {950spin_unlock_bh(&ab->base_lock);951ath12k_warn(ab, "failed to find the peer to set up rx tid\n");952return -ENOENT;953}954955if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {956spin_unlock_bh(&ab->base_lock);957ath12k_warn(ab, "reo qref table is not setup\n");958return -EINVAL;959}960961if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {962ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",963peer->peer_id, tid);964spin_unlock_bh(&ab->base_lock);965return -EINVAL;966}967968rx_tid = &peer->rx_tid[tid];969/* Update the tid queue if it is already setup */970if (rx_tid->active) {971paddr = rx_tid->paddr;972ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,973ba_win_sz, ssn, true);974spin_unlock_bh(&ab->base_lock);975if (ret) {976ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);977return ret;978}979980if (!ab->hw_params->reoq_lut_support) {981ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,982peer_mac,983paddr, tid, 1,984ba_win_sz);985if (ret) {986ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",987tid, ret);988return ret;989}990}991992return 0;993}994995rx_tid->tid = tid;996997rx_tid->ba_win_sz = ba_win_sz;998999/* TODO: Optimize the memory allocation for qos tid based on1000* the actual BA window size in REO tid update path.1001*/1002if (tid == HAL_DESC_REO_NON_QOS_TID)1003hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);1004else1005hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);10061007vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);1008if (!vaddr) {1009spin_unlock_bh(&ab->base_lock);1010return -ENOMEM;1011}10121013addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);10141015ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,1016ssn, pn_type);10171018paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,1019DMA_BIDIRECTIONAL);10201021ret = dma_mapping_error(ab->dev, paddr);1022if (ret) {1023spin_unlock_bh(&ab->base_lock);1024goto err_mem_free;1025}10261027rx_tid->vaddr = vaddr;1028rx_tid->paddr = paddr;1029rx_tid->size = hw_desc_sz;1030rx_tid->active = true;10311032if (ab->hw_params->reoq_lut_support) {1033/* Update the REO queue LUT at the corresponding peer id1034* and tid with qaddr.1035*/1036ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);1037spin_unlock_bh(&ab->base_lock);1038} else {1039spin_unlock_bh(&ab->base_lock);1040ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,1041paddr, tid, 1, ba_win_sz);1042}10431044return ret;10451046err_mem_free:1047kfree(vaddr);10481049return ret;1050}10511052int ath12k_dp_rx_ampdu_start(struct ath12k *ar,1053struct ieee80211_ampdu_params *params)1054{1055struct ath12k_base *ab = ar->ab;1056struct ath12k_sta *arsta = (void *)params->sta->drv_priv;1057int vdev_id = arsta->arvif->vdev_id;1058int ret;10591060ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,1061params->tid, params->buf_size,1062params->ssn, arsta->pn_type);1063if (ret)1064ath12k_warn(ab, "failed to setup rx tid %d\n", ret);10651066return ret;1067}10681069int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,1070struct ieee80211_ampdu_params *params)1071{1072struct ath12k_base *ab = ar->ab;1073struct ath12k_peer *peer;1074struct ath12k_sta *arsta = (void *)params->sta->drv_priv;1075int vdev_id = arsta->arvif->vdev_id;1076bool active;1077int ret;10781079spin_lock_bh(&ab->base_lock);10801081peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);1082if (!peer) {1083spin_unlock_bh(&ab->base_lock);1084ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");1085return -ENOENT;1086}10871088active = peer->rx_tid[params->tid].active;10891090if (!active) {1091spin_unlock_bh(&ab->base_lock);1092return 0;1093}10941095ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);1096spin_unlock_bh(&ab->base_lock);1097if (ret) {1098ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",1099params->tid, ret);1100return ret;1101}11021103return ret;1104}11051106int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,1107const u8 *peer_addr,1108enum set_key_cmd key_cmd,1109struct ieee80211_key_conf *key)1110{1111struct ath12k *ar = arvif->ar;1112struct ath12k_base *ab = ar->ab;1113struct ath12k_hal_reo_cmd cmd = {0};1114struct ath12k_peer *peer;1115struct ath12k_dp_rx_tid *rx_tid;1116u8 tid;1117int ret = 0;11181119/* NOTE: Enable PN/TSC replay check offload only for unicast frames.1120* We use mac80211 PN/TSC replay check functionality for bcast/mcast1121* for now.1122*/1123if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))1124return 0;11251126cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;1127cmd.upd0 = HAL_REO_CMD_UPD0_PN |1128HAL_REO_CMD_UPD0_PN_SIZE |1129HAL_REO_CMD_UPD0_PN_VALID |1130HAL_REO_CMD_UPD0_PN_CHECK |1131HAL_REO_CMD_UPD0_SVLD;11321133switch (key->cipher) {1134case WLAN_CIPHER_SUITE_TKIP:1135case WLAN_CIPHER_SUITE_CCMP:1136case WLAN_CIPHER_SUITE_CCMP_256:1137case WLAN_CIPHER_SUITE_GCMP:1138case WLAN_CIPHER_SUITE_GCMP_256:1139if (key_cmd == SET_KEY) {1140cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;1141cmd.pn_size = 48;1142}1143break;1144default:1145break;1146}11471148spin_lock_bh(&ab->base_lock);11491150peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);1151if (!peer) {1152spin_unlock_bh(&ab->base_lock);1153ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",1154peer_addr);1155return -ENOENT;1156}11571158for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {1159rx_tid = &peer->rx_tid[tid];1160if (!rx_tid->active)1161continue;1162cmd.addr_lo = lower_32_bits(rx_tid->paddr);1163cmd.addr_hi = upper_32_bits(rx_tid->paddr);1164ret = ath12k_dp_reo_cmd_send(ab, rx_tid,1165HAL_REO_CMD_UPDATE_RX_QUEUE,1166&cmd, NULL);1167if (ret) {1168ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",1169tid, peer_addr, ret);1170break;1171}1172}11731174spin_unlock_bh(&ab->base_lock);11751176return ret;1177}11781179static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,1180u16 peer_id)1181{1182int i;11831184for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {1185if (ppdu_stats->user_stats[i].is_valid_peer_id) {1186if (peer_id == ppdu_stats->user_stats[i].peer_id)1187return i;1188} else {1189return i;1190}1191}11921193return -EINVAL;1194}11951196static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,1197u16 tag, u16 len, const void *ptr,1198void *data)1199{1200const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;1201const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;1202const struct htt_ppdu_stats_user_rate *user_rate;1203struct htt_ppdu_stats_info *ppdu_info;1204struct htt_ppdu_user_stats *user_stats;1205int cur_user;1206u16 peer_id;12071208ppdu_info = data;12091210switch (tag) {1211case HTT_PPDU_STATS_TAG_COMMON:1212if (len < sizeof(struct htt_ppdu_stats_common)) {1213ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",1214len, tag);1215return -EINVAL;1216}1217memcpy(&ppdu_info->ppdu_stats.common, ptr,1218sizeof(struct htt_ppdu_stats_common));1219break;1220case HTT_PPDU_STATS_TAG_USR_RATE:1221if (len < sizeof(struct htt_ppdu_stats_user_rate)) {1222ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",1223len, tag);1224return -EINVAL;1225}1226user_rate = ptr;1227peer_id = le16_to_cpu(user_rate->sw_peer_id);1228cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,1229peer_id);1230if (cur_user < 0)1231return -EINVAL;1232user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];1233user_stats->peer_id = peer_id;1234user_stats->is_valid_peer_id = true;1235memcpy(&user_stats->rate, ptr,1236sizeof(struct htt_ppdu_stats_user_rate));1237user_stats->tlv_flags |= BIT(tag);1238break;1239case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:1240if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {1241ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",1242len, tag);1243return -EINVAL;1244}12451246cmplt_cmn = ptr;1247peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);1248cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,1249peer_id);1250if (cur_user < 0)1251return -EINVAL;1252user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];1253user_stats->peer_id = peer_id;1254user_stats->is_valid_peer_id = true;1255memcpy(&user_stats->cmpltn_cmn, ptr,1256sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));1257user_stats->tlv_flags |= BIT(tag);1258break;1259case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:1260if (len <1261sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {1262ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",1263len, tag);1264return -EINVAL;1265}12661267ba_status = ptr;1268peer_id = le16_to_cpu(ba_status->sw_peer_id);1269cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,1270peer_id);1271if (cur_user < 0)1272return -EINVAL;1273user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];1274user_stats->peer_id = peer_id;1275user_stats->is_valid_peer_id = true;1276memcpy(&user_stats->ack_ba, ptr,1277sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));1278user_stats->tlv_flags |= BIT(tag);1279break;1280}1281return 0;1282}12831284#if defined(__linux__)1285static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,1286#elif defined(__FreeBSD__)1287static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const u8 *ptr, size_t len,1288#endif1289int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,1290const void *ptr, void *data),1291void *data)1292{1293const struct htt_tlv *tlv;1294#if defined(__linux__)1295const void *begin = ptr;1296#elif defined(__FreeBSD__)1297const u8 *begin = ptr;1298#endif1299u16 tlv_tag, tlv_len;1300int ret = -EINVAL;13011302while (len > 0) {1303if (len < sizeof(*tlv)) {1304ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",1305ptr - begin, len, sizeof(*tlv));1306return -EINVAL;1307}1308#if defined(__linux__)1309tlv = (struct htt_tlv *)ptr;1310#elif defined(__FreeBSD__)1311tlv = (const struct htt_tlv *)ptr;1312#endif1313tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);1314tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);1315ptr += sizeof(*tlv);1316len -= sizeof(*tlv);13171318if (tlv_len > len) {1319ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",1320tlv_tag, ptr - begin, len, tlv_len);1321return -EINVAL;1322}1323ret = iter(ab, tlv_tag, tlv_len, ptr, data);1324if (ret == -ENOMEM)1325return ret;13261327ptr += tlv_len;1328len -= tlv_len;1329}1330return 0;1331}13321333static void1334ath12k_update_per_peer_tx_stats(struct ath12k *ar,1335struct htt_ppdu_stats *ppdu_stats, u8 user)1336{1337struct ath12k_base *ab = ar->ab;1338struct ath12k_peer *peer;1339struct ieee80211_sta *sta;1340struct ath12k_sta *arsta;1341struct htt_ppdu_stats_user_rate *user_rate;1342struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;1343struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];1344struct htt_ppdu_stats_common *common = &ppdu_stats->common;1345int ret;1346u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;1347u32 v, succ_bytes = 0;1348u16 tones, rate = 0, succ_pkts = 0;1349u32 tx_duration = 0;1350u8 tid = HTT_PPDU_STATS_NON_QOS_TID;1351bool is_ampdu = false;13521353if (!usr_stats)1354return;13551356if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))1357return;13581359if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))1360is_ampdu =1361HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);13621363if (usr_stats->tlv_flags &1364BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {1365succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);1366succ_pkts = le32_get_bits(usr_stats->ack_ba.info,1367HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);1368tid = le32_get_bits(usr_stats->ack_ba.info,1369HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);1370}13711372if (common->fes_duration_us)1373tx_duration = le32_to_cpu(common->fes_duration_us);13741375user_rate = &usr_stats->rate;1376flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);1377bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;1378nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;1379mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);1380sgi = HTT_USR_RATE_GI(user_rate->rate_flags);1381dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);13821383/* Note: If host configured fixed rates and in some other special1384* cases, the broadcast/management frames are sent in different rates.1385* Firmware rate's control to be skipped for this?1386*/13871388if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {1389ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);1390return;1391}13921393if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {1394ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);1395return;1396}13971398if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {1399ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",1400mcs, nss);1401return;1402}14031404if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {1405ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,1406flags,1407&rate_idx,1408&rate);1409if (ret < 0)1410return;1411}14121413rcu_read_lock();1414spin_lock_bh(&ab->base_lock);1415peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);14161417if (!peer || !peer->sta) {1418spin_unlock_bh(&ab->base_lock);1419rcu_read_unlock();1420return;1421}14221423sta = peer->sta;1424arsta = (struct ath12k_sta *)sta->drv_priv;14251426memset(&arsta->txrate, 0, sizeof(arsta->txrate));14271428switch (flags) {1429case WMI_RATE_PREAMBLE_OFDM:1430arsta->txrate.legacy = rate;1431break;1432case WMI_RATE_PREAMBLE_CCK:1433arsta->txrate.legacy = rate;1434break;1435case WMI_RATE_PREAMBLE_HT:1436arsta->txrate.mcs = mcs + 8 * (nss - 1);1437arsta->txrate.flags = RATE_INFO_FLAGS_MCS;1438if (sgi)1439arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;1440break;1441case WMI_RATE_PREAMBLE_VHT:1442arsta->txrate.mcs = mcs;1443arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;1444if (sgi)1445arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;1446break;1447case WMI_RATE_PREAMBLE_HE:1448arsta->txrate.mcs = mcs;1449arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;1450arsta->txrate.he_dcm = dcm;1451arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);1452tones = le16_to_cpu(user_rate->ru_end) -1453le16_to_cpu(user_rate->ru_start) + 1;1454v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);1455arsta->txrate.he_ru_alloc = v;1456break;1457}14581459arsta->txrate.nss = nss;1460arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);1461arsta->tx_duration += tx_duration;1462memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));14631464/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.1465* So skip peer stats update for mgmt packets.1466*/1467if (tid < HTT_PPDU_STATS_NON_QOS_TID) {1468memset(peer_stats, 0, sizeof(*peer_stats));1469peer_stats->succ_pkts = succ_pkts;1470peer_stats->succ_bytes = succ_bytes;1471peer_stats->is_ampdu = is_ampdu;1472peer_stats->duration = tx_duration;1473peer_stats->ba_fails =1474HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +1475HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);1476}14771478spin_unlock_bh(&ab->base_lock);1479rcu_read_unlock();1480}14811482static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,1483struct htt_ppdu_stats *ppdu_stats)1484{1485u8 user;14861487for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)1488ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);1489}14901491static1492struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,1493u32 ppdu_id)1494{1495struct htt_ppdu_stats_info *ppdu_info;14961497lockdep_assert_held(&ar->data_lock);1498if (!list_empty(&ar->ppdu_stats_info)) {1499list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {1500if (ppdu_info->ppdu_id == ppdu_id)1501return ppdu_info;1502}15031504if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {1505ppdu_info = list_first_entry(&ar->ppdu_stats_info,1506typeof(*ppdu_info), list);1507list_del(&ppdu_info->list);1508ar->ppdu_stat_list_depth--;1509ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);1510kfree(ppdu_info);1511}1512}15131514ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);1515if (!ppdu_info)1516return NULL;15171518list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);1519ar->ppdu_stat_list_depth++;15201521return ppdu_info;1522}15231524static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,1525struct htt_ppdu_user_stats *usr_stats)1526{1527peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);1528peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);1529peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);1530peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);1531peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);1532peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);1533peer->ppdu_stats_delayba.resp_rate_flags =1534le32_to_cpu(usr_stats->rate.resp_rate_flags);15351536peer->delayba_flag = true;1537}15381539static void ath12k_copy_to_bar(struct ath12k_peer *peer,1540struct htt_ppdu_user_stats *usr_stats)1541{1542usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);1543usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);1544usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);1545usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);1546usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);1547usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);1548usr_stats->rate.resp_rate_flags =1549cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);15501551peer->delayba_flag = false;1552}15531554static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,1555struct sk_buff *skb)1556{1557struct ath12k_htt_ppdu_stats_msg *msg;1558struct htt_ppdu_stats_info *ppdu_info;1559struct ath12k_peer *peer = NULL;1560struct htt_ppdu_user_stats *usr_stats = NULL;1561u32 peer_id = 0;1562struct ath12k *ar;1563int ret, i;1564u8 pdev_id;1565u32 ppdu_id, len;15661567msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;1568len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);1569pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);1570ppdu_id = le32_to_cpu(msg->ppdu_id);15711572rcu_read_lock();1573ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);1574if (!ar) {1575ret = -EINVAL;1576goto exit;1577}15781579spin_lock_bh(&ar->data_lock);1580ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);1581if (!ppdu_info) {1582spin_unlock_bh(&ar->data_lock);1583ret = -EINVAL;1584goto exit;1585}15861587ppdu_info->ppdu_id = ppdu_id;1588ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,1589ath12k_htt_tlv_ppdu_stats_parse,1590(void *)ppdu_info);1591if (ret) {1592spin_unlock_bh(&ar->data_lock);1593ath12k_warn(ab, "Failed to parse tlv %d\n", ret);1594goto exit;1595}15961597/* back up data rate tlv for all peers */1598if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&1599(ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&1600ppdu_info->delay_ba) {1601for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {1602peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;1603spin_lock_bh(&ab->base_lock);1604peer = ath12k_peer_find_by_id(ab, peer_id);1605if (!peer) {1606spin_unlock_bh(&ab->base_lock);1607continue;1608}16091610usr_stats = &ppdu_info->ppdu_stats.user_stats[i];1611if (usr_stats->delay_ba)1612ath12k_copy_to_delay_stats(peer, usr_stats);1613spin_unlock_bh(&ab->base_lock);1614}1615}16161617/* restore all peers' data rate tlv to mu-bar tlv */1618if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&1619(ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {1620for (i = 0; i < ppdu_info->bar_num_users; i++) {1621peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;1622spin_lock_bh(&ab->base_lock);1623peer = ath12k_peer_find_by_id(ab, peer_id);1624if (!peer) {1625spin_unlock_bh(&ab->base_lock);1626continue;1627}16281629usr_stats = &ppdu_info->ppdu_stats.user_stats[i];1630if (peer->delayba_flag)1631ath12k_copy_to_bar(peer, usr_stats);1632spin_unlock_bh(&ab->base_lock);1633}1634}16351636spin_unlock_bh(&ar->data_lock);16371638exit:1639rcu_read_unlock();16401641return ret;1642}16431644static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,1645struct sk_buff *skb)1646{1647struct ath12k_htt_mlo_offset_msg *msg;1648struct ath12k_pdev *pdev;1649struct ath12k *ar;1650u8 pdev_id;16511652msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;1653pdev_id = u32_get_bits(__le32_to_cpu(msg->info),1654HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);1655ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);16561657if (!ar) {1658ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);1659return;1660}16611662spin_lock_bh(&ar->data_lock);1663pdev = ar->pdev;16641665pdev->timestamp.info = __le32_to_cpu(msg->info);1666pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);1667pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);1668pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);1669pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);1670pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);1671pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);1672pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);16731674spin_unlock_bh(&ar->data_lock);1675}16761677void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,1678struct sk_buff *skb)1679{1680struct ath12k_dp *dp = &ab->dp;1681struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;1682enum htt_t2h_msg_type type;1683u16 peer_id;1684u8 vdev_id;1685u8 mac_addr[ETH_ALEN];1686u16 peer_mac_h16;1687u16 ast_hash = 0;1688u16 hw_peer_id;16891690type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);16911692ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);16931694switch (type) {1695case HTT_T2H_MSG_TYPE_VERSION_CONF:1696dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,1697HTT_T2H_VERSION_CONF_MAJOR);1698dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,1699HTT_T2H_VERSION_CONF_MINOR);1700complete(&dp->htt_tgt_version_received);1701break;1702/* TODO: remove unused peer map versions after testing */1703case HTT_T2H_MSG_TYPE_PEER_MAP:1704vdev_id = le32_get_bits(resp->peer_map_ev.info,1705HTT_T2H_PEER_MAP_INFO_VDEV_ID);1706peer_id = le32_get_bits(resp->peer_map_ev.info,1707HTT_T2H_PEER_MAP_INFO_PEER_ID);1708peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,1709HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);1710ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),1711peer_mac_h16, mac_addr);1712ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);1713break;1714case HTT_T2H_MSG_TYPE_PEER_MAP2:1715vdev_id = le32_get_bits(resp->peer_map_ev.info,1716HTT_T2H_PEER_MAP_INFO_VDEV_ID);1717peer_id = le32_get_bits(resp->peer_map_ev.info,1718HTT_T2H_PEER_MAP_INFO_PEER_ID);1719peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,1720HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);1721ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),1722peer_mac_h16, mac_addr);1723ast_hash = le32_get_bits(resp->peer_map_ev.info2,1724HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);1725hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,1726HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);1727ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,1728hw_peer_id);1729break;1730case HTT_T2H_MSG_TYPE_PEER_MAP3:1731vdev_id = le32_get_bits(resp->peer_map_ev.info,1732HTT_T2H_PEER_MAP_INFO_VDEV_ID);1733peer_id = le32_get_bits(resp->peer_map_ev.info,1734HTT_T2H_PEER_MAP_INFO_PEER_ID);1735peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,1736HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);1737ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),1738peer_mac_h16, mac_addr);1739ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,1740peer_id);1741break;1742case HTT_T2H_MSG_TYPE_PEER_UNMAP:1743case HTT_T2H_MSG_TYPE_PEER_UNMAP2:1744peer_id = le32_get_bits(resp->peer_unmap_ev.info,1745HTT_T2H_PEER_UNMAP_INFO_PEER_ID);1746ath12k_peer_unmap_event(ab, peer_id);1747break;1748case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:1749ath12k_htt_pull_ppdu_stats(ab, skb);1750break;1751case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:1752break;1753case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:1754ath12k_htt_mlo_offset_event_handler(ab, skb);1755break;1756default:1757ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",1758type);1759break;1760}17611762dev_kfree_skb_any(skb);1763}17641765static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,1766struct sk_buff_head *msdu_list,1767struct sk_buff *first, struct sk_buff *last,1768u8 l3pad_bytes, int msdu_len)1769{1770struct ath12k_base *ab = ar->ab;1771struct sk_buff *skb;1772struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);1773int buf_first_hdr_len, buf_first_len;1774struct hal_rx_desc *ldesc;1775int space_extra, rem_len, buf_len;1776u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;17771778/* As the msdu is spread across multiple rx buffers,1779* find the offset to the start of msdu for computing1780* the length of the msdu in the first buffer.1781*/1782buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;1783buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;17841785if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {1786skb_put(first, buf_first_hdr_len + msdu_len);1787skb_pull(first, buf_first_hdr_len);1788return 0;1789}17901791ldesc = (struct hal_rx_desc *)last->data;1792rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);1793rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);17941795/* MSDU spans over multiple buffers because the length of the MSDU1796* exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data1797* in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.1798*/1799skb_put(first, DP_RX_BUFFER_SIZE);1800skb_pull(first, buf_first_hdr_len);18011802/* When an MSDU spread over multiple buffers MSDU_END1803* tlvs are valid only in the last buffer. Copy those tlvs.1804*/1805ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);18061807space_extra = msdu_len - (buf_first_len + skb_tailroom(first));1808if (space_extra > 0 &&1809(pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {1810/* Free up all buffers of the MSDU */1811while ((skb = __skb_dequeue(msdu_list)) != NULL) {1812rxcb = ATH12K_SKB_RXCB(skb);1813if (!rxcb->is_continuation) {1814dev_kfree_skb_any(skb);1815break;1816}1817dev_kfree_skb_any(skb);1818}1819return -ENOMEM;1820}18211822rem_len = msdu_len - buf_first_len;1823while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {1824rxcb = ATH12K_SKB_RXCB(skb);1825if (rxcb->is_continuation)1826buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;1827else1828buf_len = rem_len;18291830if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {1831WARN_ON_ONCE(1);1832dev_kfree_skb_any(skb);1833return -EINVAL;1834}18351836skb_put(skb, buf_len + hal_rx_desc_sz);1837skb_pull(skb, hal_rx_desc_sz);1838skb_copy_from_linear_data(skb, skb_put(first, buf_len),1839buf_len);1840dev_kfree_skb_any(skb);18411842rem_len -= buf_len;1843if (!rxcb->is_continuation)1844break;1845}18461847return 0;1848}18491850static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,1851struct sk_buff *first)1852{1853struct sk_buff *skb;1854struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);18551856if (!rxcb->is_continuation)1857return first;18581859skb_queue_walk(msdu_list, skb) {1860rxcb = ATH12K_SKB_RXCB(skb);1861if (!rxcb->is_continuation)1862return skb;1863}18641865return NULL;1866}18671868static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)1869{1870struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);1871struct ath12k_base *ab = ar->ab;1872bool ip_csum_fail, l4_csum_fail;18731874ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);1875l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);18761877msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?1878CHECKSUM_NONE : CHECKSUM_UNNECESSARY;1879}18801881static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,1882enum hal_encrypt_type enctype)1883{1884switch (enctype) {1885case HAL_ENCRYPT_TYPE_OPEN:1886case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:1887case HAL_ENCRYPT_TYPE_TKIP_MIC:1888return 0;1889case HAL_ENCRYPT_TYPE_CCMP_128:1890return IEEE80211_CCMP_MIC_LEN;1891case HAL_ENCRYPT_TYPE_CCMP_256:1892return IEEE80211_CCMP_256_MIC_LEN;1893case HAL_ENCRYPT_TYPE_GCMP_128:1894case HAL_ENCRYPT_TYPE_AES_GCMP_256:1895return IEEE80211_GCMP_MIC_LEN;1896case HAL_ENCRYPT_TYPE_WEP_40:1897case HAL_ENCRYPT_TYPE_WEP_104:1898case HAL_ENCRYPT_TYPE_WEP_128:1899case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:1900case HAL_ENCRYPT_TYPE_WAPI:1901break;1902}19031904ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);1905return 0;1906}19071908static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,1909enum hal_encrypt_type enctype)1910{1911switch (enctype) {1912case HAL_ENCRYPT_TYPE_OPEN:1913return 0;1914case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:1915case HAL_ENCRYPT_TYPE_TKIP_MIC:1916return IEEE80211_TKIP_IV_LEN;1917case HAL_ENCRYPT_TYPE_CCMP_128:1918return IEEE80211_CCMP_HDR_LEN;1919case HAL_ENCRYPT_TYPE_CCMP_256:1920return IEEE80211_CCMP_256_HDR_LEN;1921case HAL_ENCRYPT_TYPE_GCMP_128:1922case HAL_ENCRYPT_TYPE_AES_GCMP_256:1923return IEEE80211_GCMP_HDR_LEN;1924case HAL_ENCRYPT_TYPE_WEP_40:1925case HAL_ENCRYPT_TYPE_WEP_104:1926case HAL_ENCRYPT_TYPE_WEP_128:1927case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:1928case HAL_ENCRYPT_TYPE_WAPI:1929break;1930}19311932ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);1933return 0;1934}19351936static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,1937enum hal_encrypt_type enctype)1938{1939switch (enctype) {1940case HAL_ENCRYPT_TYPE_OPEN:1941case HAL_ENCRYPT_TYPE_CCMP_128:1942case HAL_ENCRYPT_TYPE_CCMP_256:1943case HAL_ENCRYPT_TYPE_GCMP_128:1944case HAL_ENCRYPT_TYPE_AES_GCMP_256:1945return 0;1946case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:1947case HAL_ENCRYPT_TYPE_TKIP_MIC:1948return IEEE80211_TKIP_ICV_LEN;1949case HAL_ENCRYPT_TYPE_WEP_40:1950case HAL_ENCRYPT_TYPE_WEP_104:1951case HAL_ENCRYPT_TYPE_WEP_128:1952case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:1953case HAL_ENCRYPT_TYPE_WAPI:1954break;1955}19561957ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);1958return 0;1959}19601961static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,1962struct sk_buff *msdu,1963enum hal_encrypt_type enctype,1964struct ieee80211_rx_status *status)1965{1966struct ath12k_base *ab = ar->ab;1967struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);1968u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];1969struct ieee80211_hdr *hdr;1970size_t hdr_len;1971u8 *crypto_hdr;1972u16 qos_ctl;19731974/* pull decapped header */1975hdr = (struct ieee80211_hdr *)msdu->data;1976hdr_len = ieee80211_hdrlen(hdr->frame_control);1977skb_pull(msdu, hdr_len);19781979/* Rebuild qos header */1980hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);19811982/* Reset the order bit as the HT_Control header is stripped */1983hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));19841985qos_ctl = rxcb->tid;19861987if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))1988qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;19891990/* TODO: Add other QoS ctl fields when required */19911992/* copy decap header before overwriting for reuse below */1993memcpy(decap_hdr, hdr, hdr_len);19941995/* Rebuild crypto header for mac80211 use */1996if (!(status->flag & RX_FLAG_IV_STRIPPED)) {1997crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));1998ath12k_dp_rx_desc_get_crypto_header(ar->ab,1999rxcb->rx_desc, crypto_hdr,2000enctype);2001}20022003memcpy(skb_push(msdu,2004IEEE80211_QOS_CTL_LEN), &qos_ctl,2005IEEE80211_QOS_CTL_LEN);2006memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);2007}20082009static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,2010enum hal_encrypt_type enctype,2011struct ieee80211_rx_status *status,2012bool decrypted)2013{2014struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);2015struct ieee80211_hdr *hdr;2016size_t hdr_len;2017size_t crypto_len;20182019if (!rxcb->is_first_msdu ||2020!(rxcb->is_first_msdu && rxcb->is_last_msdu)) {2021WARN_ON_ONCE(1);2022return;2023}20242025skb_trim(msdu, msdu->len - FCS_LEN);20262027if (!decrypted)2028return;20292030hdr = (void *)msdu->data;20312032/* Tail */2033if (status->flag & RX_FLAG_IV_STRIPPED) {2034skb_trim(msdu, msdu->len -2035ath12k_dp_rx_crypto_mic_len(ar, enctype));20362037skb_trim(msdu, msdu->len -2038ath12k_dp_rx_crypto_icv_len(ar, enctype));2039} else {2040/* MIC */2041if (status->flag & RX_FLAG_MIC_STRIPPED)2042skb_trim(msdu, msdu->len -2043ath12k_dp_rx_crypto_mic_len(ar, enctype));20442045/* ICV */2046if (status->flag & RX_FLAG_ICV_STRIPPED)2047skb_trim(msdu, msdu->len -2048ath12k_dp_rx_crypto_icv_len(ar, enctype));2049}20502051/* MMIC */2052if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&2053!ieee80211_has_morefrags(hdr->frame_control) &&2054enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)2055skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);20562057/* Head */2058if (status->flag & RX_FLAG_IV_STRIPPED) {2059hdr_len = ieee80211_hdrlen(hdr->frame_control);2060crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);20612062memmove(msdu->data + crypto_len, msdu->data, hdr_len);2063skb_pull(msdu, crypto_len);2064}2065}20662067static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,2068struct sk_buff *msdu,2069struct ath12k_skb_rxcb *rxcb,2070struct ieee80211_rx_status *status,2071enum hal_encrypt_type enctype)2072{2073struct hal_rx_desc *rx_desc = rxcb->rx_desc;2074struct ath12k_base *ab = ar->ab;2075size_t hdr_len, crypto_len;2076struct ieee80211_hdr *hdr;2077u16 qos_ctl;2078__le16 fc;2079u8 *crypto_hdr;20802081if (!(status->flag & RX_FLAG_IV_STRIPPED)) {2082crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);2083crypto_hdr = skb_push(msdu, crypto_len);2084ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);2085}20862087fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));2088hdr_len = ieee80211_hdrlen(fc);2089skb_push(msdu, hdr_len);2090hdr = (struct ieee80211_hdr *)msdu->data;2091hdr->frame_control = fc;20922093/* Get wifi header from rx_desc */2094ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);20952096if (rxcb->is_mcbc)2097status->flag &= ~RX_FLAG_PN_VALIDATED;20982099/* Add QOS header */2100if (ieee80211_is_data_qos(hdr->frame_control)) {2101qos_ctl = rxcb->tid;2102if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))2103qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;21042105/* TODO: Add other QoS ctl fields when required */2106memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),2107&qos_ctl, IEEE80211_QOS_CTL_LEN);2108}2109}21102111static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,2112struct sk_buff *msdu,2113enum hal_encrypt_type enctype,2114struct ieee80211_rx_status *status)2115{2116struct ieee80211_hdr *hdr;2117struct ethhdr *eth;2118u8 da[ETH_ALEN];2119u8 sa[ETH_ALEN];2120struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);2121struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};21222123eth = (struct ethhdr *)msdu->data;2124ether_addr_copy(da, eth->h_dest);2125ether_addr_copy(sa, eth->h_source);2126rfc.snap_type = eth->h_proto;2127skb_pull(msdu, sizeof(*eth));2128memcpy(skb_push(msdu, sizeof(rfc)), &rfc,2129sizeof(rfc));2130ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);21312132/* original 802.11 header has a different DA and in2133* case of 4addr it may also have different SA2134*/2135hdr = (struct ieee80211_hdr *)msdu->data;2136ether_addr_copy(ieee80211_get_DA(hdr), da);2137ether_addr_copy(ieee80211_get_SA(hdr), sa);2138}21392140static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,2141struct hal_rx_desc *rx_desc,2142enum hal_encrypt_type enctype,2143struct ieee80211_rx_status *status,2144bool decrypted)2145{2146struct ath12k_base *ab = ar->ab;2147u8 decap;2148struct ethhdr *ehdr;21492150decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);21512152switch (decap) {2153case DP_RX_DECAP_TYPE_NATIVE_WIFI:2154ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);2155break;2156case DP_RX_DECAP_TYPE_RAW:2157ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,2158decrypted);2159break;2160case DP_RX_DECAP_TYPE_ETHERNET2_DIX:2161ehdr = (struct ethhdr *)msdu->data;21622163/* mac80211 allows fast path only for authorized STA */2164if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {2165ATH12K_SKB_RXCB(msdu)->is_eapol = true;2166ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);2167break;2168}21692170/* PN for mcast packets will be validated in mac80211;2171* remove eth header and add 802.11 header.2172*/2173if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)2174ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);2175break;2176case DP_RX_DECAP_TYPE_8023:2177/* TODO: Handle undecap for these formats */2178break;2179}2180}21812182struct ath12k_peer *2183ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)2184{2185struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);2186struct hal_rx_desc *rx_desc = rxcb->rx_desc;2187struct ath12k_peer *peer = NULL;21882189lockdep_assert_held(&ab->base_lock);21902191if (rxcb->peer_id)2192peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);21932194if (peer)2195return peer;21962197if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))2198return NULL;21992200peer = ath12k_peer_find_by_addr(ab,2201ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,2202rx_desc));2203return peer;2204}22052206static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,2207struct sk_buff *msdu,2208struct hal_rx_desc *rx_desc,2209struct ieee80211_rx_status *rx_status)2210{2211bool fill_crypto_hdr;2212struct ath12k_base *ab = ar->ab;2213struct ath12k_skb_rxcb *rxcb;2214enum hal_encrypt_type enctype;2215bool is_decrypted = false;2216struct ieee80211_hdr *hdr;2217struct ath12k_peer *peer;2218u32 err_bitmap;22192220/* PN for multicast packets will be checked in mac80211 */2221rxcb = ATH12K_SKB_RXCB(msdu);2222fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);2223rxcb->is_mcbc = fill_crypto_hdr;22242225if (rxcb->is_mcbc)2226rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);22272228spin_lock_bh(&ar->ab->base_lock);2229peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);2230if (peer) {2231if (rxcb->is_mcbc)2232enctype = peer->sec_type_grp;2233else2234enctype = peer->sec_type;2235} else {2236enctype = HAL_ENCRYPT_TYPE_OPEN;2237}2238spin_unlock_bh(&ar->ab->base_lock);22392240err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);2241if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)2242is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);22432244/* Clear per-MPDU flags while leaving per-PPDU flags intact */2245rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |2246RX_FLAG_MMIC_ERROR |2247RX_FLAG_DECRYPTED |2248RX_FLAG_IV_STRIPPED |2249RX_FLAG_MMIC_STRIPPED);22502251if (err_bitmap & HAL_RX_MPDU_ERR_FCS)2252rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;2253if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)2254rx_status->flag |= RX_FLAG_MMIC_ERROR;22552256if (is_decrypted) {2257rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;22582259if (fill_crypto_hdr)2260rx_status->flag |= RX_FLAG_MIC_STRIPPED |2261RX_FLAG_ICV_STRIPPED;2262else2263rx_status->flag |= RX_FLAG_IV_STRIPPED |2264RX_FLAG_PN_VALIDATED;2265}22662267ath12k_dp_rx_h_csum_offload(ar, msdu);2268ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,2269enctype, rx_status, is_decrypted);22702271if (!is_decrypted || fill_crypto_hdr)2272return;22732274if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=2275DP_RX_DECAP_TYPE_ETHERNET2_DIX) {2276hdr = (void *)msdu->data;2277hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);2278}2279}22802281static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,2282struct ieee80211_rx_status *rx_status)2283{2284struct ath12k_base *ab = ar->ab;2285struct ieee80211_supported_band *sband;2286enum rx_msdu_start_pkt_type pkt_type;2287u8 bw;2288u8 rate_mcs, nss;2289u8 sgi;2290bool is_cck;22912292pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);2293bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);2294rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);2295nss = ath12k_dp_rx_h_nss(ab, rx_desc);2296sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);22972298switch (pkt_type) {2299case RX_MSDU_START_PKT_TYPE_11A:2300case RX_MSDU_START_PKT_TYPE_11B:2301is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);2302sband = &ar->mac.sbands[rx_status->band];2303rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,2304is_cck);2305break;2306case RX_MSDU_START_PKT_TYPE_11N:2307rx_status->encoding = RX_ENC_HT;2308if (rate_mcs > ATH12K_HT_MCS_MAX) {2309ath12k_warn(ar->ab,2310"Received with invalid mcs in HT mode %d\n",2311rate_mcs);2312break;2313}2314rx_status->rate_idx = rate_mcs + (8 * (nss - 1));2315if (sgi)2316rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;2317rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);2318break;2319case RX_MSDU_START_PKT_TYPE_11AC:2320rx_status->encoding = RX_ENC_VHT;2321rx_status->rate_idx = rate_mcs;2322if (rate_mcs > ATH12K_VHT_MCS_MAX) {2323ath12k_warn(ar->ab,2324"Received with invalid mcs in VHT mode %d\n",2325rate_mcs);2326break;2327}2328rx_status->nss = nss;2329if (sgi)2330rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;2331rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);2332break;2333case RX_MSDU_START_PKT_TYPE_11AX:2334rx_status->rate_idx = rate_mcs;2335if (rate_mcs > ATH12K_HE_MCS_MAX) {2336ath12k_warn(ar->ab,2337"Received with invalid mcs in HE mode %d\n",2338rate_mcs);2339break;2340}2341rx_status->encoding = RX_ENC_HE;2342rx_status->nss = nss;2343rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);2344rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);2345break;2346}2347}23482349void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,2350struct ieee80211_rx_status *rx_status)2351{2352struct ath12k_base *ab = ar->ab;2353u8 channel_num;2354u32 center_freq, meta_data;2355struct ieee80211_channel *channel;23562357rx_status->freq = 0;2358rx_status->rate_idx = 0;2359rx_status->nss = 0;2360rx_status->encoding = RX_ENC_LEGACY;2361rx_status->bw = RATE_INFO_BW_20;2362rx_status->enc_flags = 0;23632364rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;23652366meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);2367channel_num = meta_data;2368center_freq = meta_data >> 16;23692370if (center_freq >= 5935 && center_freq <= 7105) {2371rx_status->band = NL80211_BAND_6GHZ;2372} else if (channel_num >= 1 && channel_num <= 14) {2373rx_status->band = NL80211_BAND_2GHZ;2374} else if (channel_num >= 36 && channel_num <= 173) {2375rx_status->band = NL80211_BAND_5GHZ;2376} else {2377spin_lock_bh(&ar->data_lock);2378channel = ar->rx_channel;2379if (channel) {2380rx_status->band = channel->band;2381channel_num =2382ieee80211_frequency_to_channel(channel->center_freq);2383}2384spin_unlock_bh(&ar->data_lock);2385ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",2386rx_desc, sizeof(*rx_desc));2387}23882389rx_status->freq = ieee80211_channel_to_frequency(channel_num,2390rx_status->band);23912392ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);2393}23942395static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,2396struct sk_buff *msdu,2397struct ieee80211_rx_status *status)2398{2399struct ath12k_base *ab = ar->ab;2400static const struct ieee80211_radiotap_he known = {2401.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |2402IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),2403.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),2404};2405struct ieee80211_radiotap_he *he;2406struct ieee80211_rx_status *rx_status;2407struct ieee80211_sta *pubsta;2408struct ath12k_peer *peer;2409struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);2410u8 decap = DP_RX_DECAP_TYPE_RAW;2411bool is_mcbc = rxcb->is_mcbc;2412bool is_eapol = rxcb->is_eapol;24132414if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&2415!(status->flag & RX_FLAG_SKIP_MONITOR)) {2416he = skb_push(msdu, sizeof(known));2417memcpy(he, &known, sizeof(known));2418status->flag |= RX_FLAG_RADIOTAP_HE;2419}24202421if (!(status->flag & RX_FLAG_ONLY_MONITOR))2422decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);24232424spin_lock_bh(&ab->base_lock);2425peer = ath12k_dp_rx_h_find_peer(ab, msdu);24262427pubsta = peer ? peer->sta : NULL;24282429spin_unlock_bh(&ab->base_lock);24302431ath12k_dbg(ab, ATH12K_DBG_DATA,2432"rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",2433msdu,2434msdu->len,2435peer ? peer->addr : NULL,2436rxcb->tid,2437is_mcbc ? "mcast" : "ucast",2438ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),2439(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",2440(status->encoding == RX_ENC_HT) ? "ht" : "",2441(status->encoding == RX_ENC_VHT) ? "vht" : "",2442(status->encoding == RX_ENC_HE) ? "he" : "",2443(status->bw == RATE_INFO_BW_40) ? "40" : "",2444(status->bw == RATE_INFO_BW_80) ? "80" : "",2445(status->bw == RATE_INFO_BW_160) ? "160" : "",2446status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",2447status->rate_idx,2448status->nss,2449status->freq,2450status->band, status->flag,2451!!(status->flag & RX_FLAG_FAILED_FCS_CRC),2452!!(status->flag & RX_FLAG_MMIC_ERROR),2453!!(status->flag & RX_FLAG_AMSDU_MORE));24542455ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",2456msdu->data, msdu->len);24572458rx_status = IEEE80211_SKB_RXCB(msdu);2459*rx_status = *status;24602461/* TODO: trace rx packet */24622463/* PN for multicast packets are not validate in HW,2464* so skip 802.3 rx path2465* Also, fast_rx expects the STA to be authorized, hence2466* eapol packets are sent in slow path.2467*/2468if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&2469!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))2470rx_status->flag |= RX_FLAG_8023;24712472ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);2473}24742475static int ath12k_dp_rx_process_msdu(struct ath12k *ar,2476struct sk_buff *msdu,2477struct sk_buff_head *msdu_list,2478struct ieee80211_rx_status *rx_status)2479{2480struct ath12k_base *ab = ar->ab;2481struct hal_rx_desc *rx_desc, *lrx_desc;2482struct ath12k_skb_rxcb *rxcb;2483struct sk_buff *last_buf;2484u8 l3_pad_bytes;2485u16 msdu_len;2486int ret;2487u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;24882489last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);2490if (!last_buf) {2491ath12k_warn(ab,2492"No valid Rx buffer to access MSDU_END tlv\n");2493ret = -EIO;2494goto free_out;2495}24962497rx_desc = (struct hal_rx_desc *)msdu->data;2498lrx_desc = (struct hal_rx_desc *)last_buf->data;2499if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {2500ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");2501ret = -EIO;2502goto free_out;2503}25042505rxcb = ATH12K_SKB_RXCB(msdu);2506rxcb->rx_desc = rx_desc;2507msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);2508l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);25092510if (rxcb->is_frag) {2511skb_pull(msdu, hal_rx_desc_sz);2512} else if (!rxcb->is_continuation) {2513if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {2514ret = -EINVAL;2515ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);2516ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,2517sizeof(*rx_desc));2518goto free_out;2519}2520skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);2521skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);2522} else {2523ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,2524msdu, last_buf,2525l3_pad_bytes, msdu_len);2526if (ret) {2527ath12k_warn(ab,2528"failed to coalesce msdu rx buffer%d\n", ret);2529goto free_out;2530}2531}25322533ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);2534ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);25352536rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;25372538return 0;25392540free_out:2541return ret;2542}25432544static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,2545struct napi_struct *napi,2546struct sk_buff_head *msdu_list,2547int ring_id)2548{2549struct ieee80211_rx_status rx_status = {0};2550struct ath12k_skb_rxcb *rxcb;2551struct sk_buff *msdu;2552struct ath12k *ar;2553u8 mac_id, pdev_id;2554int ret;25552556if (skb_queue_empty(msdu_list))2557return;25582559rcu_read_lock();25602561while ((msdu = __skb_dequeue(msdu_list))) {2562rxcb = ATH12K_SKB_RXCB(msdu);2563mac_id = rxcb->mac_id;2564pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);2565ar = ab->pdevs[pdev_id].ar;2566if (!rcu_dereference(ab->pdevs_active[pdev_id])) {2567dev_kfree_skb_any(msdu);2568continue;2569}25702571if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {2572dev_kfree_skb_any(msdu);2573continue;2574}25752576ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);2577if (ret) {2578ath12k_dbg(ab, ATH12K_DBG_DATA,2579"Unable to process msdu %d", ret);2580dev_kfree_skb_any(msdu);2581continue;2582}25832584ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);2585}25862587rcu_read_unlock();2588}25892590int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,2591struct napi_struct *napi, int budget)2592{2593struct ath12k_rx_desc_info *desc_info;2594struct ath12k_dp *dp = &ab->dp;2595struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;2596struct hal_reo_dest_ring *desc;2597int num_buffs_reaped = 0;2598struct sk_buff_head msdu_list;2599struct ath12k_skb_rxcb *rxcb;2600int total_msdu_reaped = 0;2601struct hal_srng *srng;2602struct sk_buff *msdu;2603bool done = false;2604int mac_id;2605u64 desc_va;26062607__skb_queue_head_init(&msdu_list);26082609srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];26102611spin_lock_bh(&srng->lock);26122613try_again:2614ath12k_hal_srng_access_begin(ab, srng);26152616while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {2617enum hal_reo_dest_ring_push_reason push_reason;2618u32 cookie;26192620cookie = le32_get_bits(desc->buf_addr_info.info1,2621BUFFER_ADDR_INFO1_SW_COOKIE);26222623mac_id = le32_get_bits(desc->info0,2624HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);26252626desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |2627le32_to_cpu(desc->buf_va_lo));2628desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);26292630/* retry manual desc retrieval */2631if (!desc_info) {2632desc_info = ath12k_dp_get_rx_desc(ab, cookie);2633if (!desc_info) {2634ath12k_warn(ab, "Invalid cookie in manual desc retrieval");2635continue;2636}2637}26382639if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)2640ath12k_warn(ab, "Check HW CC implementation");26412642msdu = desc_info->skb;2643desc_info->skb = NULL;26442645spin_lock_bh(&dp->rx_desc_lock);2646list_move_tail(&desc_info->list, &dp->rx_desc_free_list);2647spin_unlock_bh(&dp->rx_desc_lock);26482649rxcb = ATH12K_SKB_RXCB(msdu);2650dma_unmap_single(ab->dev, rxcb->paddr,2651msdu->len + skb_tailroom(msdu),2652DMA_FROM_DEVICE);26532654num_buffs_reaped++;26552656push_reason = le32_get_bits(desc->info0,2657HAL_REO_DEST_RING_INFO0_PUSH_REASON);2658if (push_reason !=2659HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {2660dev_kfree_skb_any(msdu);2661ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;2662continue;2663}26642665rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &2666RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);2667rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &2668RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);2669rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &2670RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);2671rxcb->mac_id = mac_id;2672rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,2673RX_MPDU_DESC_META_DATA_PEER_ID);2674rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,2675RX_MPDU_DESC_INFO0_TID);26762677__skb_queue_tail(&msdu_list, msdu);26782679if (!rxcb->is_continuation) {2680total_msdu_reaped++;2681done = true;2682} else {2683done = false;2684}26852686if (total_msdu_reaped >= budget)2687break;2688}26892690/* Hw might have updated the head pointer after we cached it.2691* In this case, even though there are entries in the ring we'll2692* get rx_desc NULL. Give the read another try with updated cached2693* head pointer so that we can reap complete MPDU in the current2694* rx processing.2695*/2696if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {2697ath12k_hal_srng_access_end(ab, srng);2698goto try_again;2699}27002701ath12k_hal_srng_access_end(ab, srng);27022703spin_unlock_bh(&srng->lock);27042705if (!total_msdu_reaped)2706goto exit;27072708/* TODO: Move to implicit BM? */2709ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,2710ab->hw_params->hal_params->rx_buf_rbm, true);27112712ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,2713ring_id);27142715exit:2716return total_msdu_reaped;2717}27182719static void ath12k_dp_rx_frag_timer(struct timer_list *timer)2720{2721struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);27222723spin_lock_bh(&rx_tid->ab->base_lock);2724if (rx_tid->last_frag_no &&2725rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {2726spin_unlock_bh(&rx_tid->ab->base_lock);2727return;2728}2729ath12k_dp_rx_frags_cleanup(rx_tid, true);2730spin_unlock_bh(&rx_tid->ab->base_lock);2731}27322733int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)2734{2735struct ath12k_base *ab = ar->ab;2736struct crypto_shash *tfm;2737struct ath12k_peer *peer;2738struct ath12k_dp_rx_tid *rx_tid;2739int i;27402741tfm = crypto_alloc_shash("michael_mic", 0, 0);2742if (IS_ERR(tfm))2743return PTR_ERR(tfm);27442745spin_lock_bh(&ab->base_lock);27462747peer = ath12k_peer_find(ab, vdev_id, peer_mac);2748if (!peer) {2749spin_unlock_bh(&ab->base_lock);2750ath12k_warn(ab, "failed to find the peer to set up fragment info\n");2751return -ENOENT;2752}27532754for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {2755rx_tid = &peer->rx_tid[i];2756rx_tid->ab = ab;2757timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);2758skb_queue_head_init(&rx_tid->rx_frags);2759}27602761peer->tfm_mmic = tfm;2762spin_unlock_bh(&ab->base_lock);27632764return 0;2765}27662767static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,2768struct ieee80211_hdr *hdr, u8 *data,2769size_t data_len, u8 *mic)2770{2771SHASH_DESC_ON_STACK(desc, tfm);2772u8 mic_hdr[16] = {0};2773u8 tid = 0;2774int ret;27752776if (!tfm)2777return -EINVAL;27782779desc->tfm = tfm;27802781ret = crypto_shash_setkey(tfm, key, 8);2782if (ret)2783goto out;27842785ret = crypto_shash_init(desc);2786if (ret)2787goto out;27882789/* TKIP MIC header */2790memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);2791memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);2792if (ieee80211_is_data_qos(hdr->frame_control))2793tid = ieee80211_get_tid(hdr);2794mic_hdr[12] = tid;27952796ret = crypto_shash_update(desc, mic_hdr, 16);2797if (ret)2798goto out;2799ret = crypto_shash_update(desc, data, data_len);2800if (ret)2801goto out;2802ret = crypto_shash_final(desc, mic);2803out:2804shash_desc_zero(desc);2805return ret;2806}28072808static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,2809struct sk_buff *msdu)2810{2811struct ath12k_base *ab = ar->ab;2812struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;2813struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);2814struct ieee80211_key_conf *key_conf;2815struct ieee80211_hdr *hdr;2816u8 mic[IEEE80211_CCMP_MIC_LEN];2817int head_len, tail_len, ret;2818size_t data_len;2819u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;2820u8 *key, *data;2821u8 key_idx;28222823if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)2824return 0;28252826hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);2827hdr_len = ieee80211_hdrlen(hdr->frame_control);2828head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;2829tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;28302831if (!is_multicast_ether_addr(hdr->addr1))2832key_idx = peer->ucast_keyidx;2833else2834key_idx = peer->mcast_keyidx;28352836key_conf = peer->keys[key_idx];28372838data = msdu->data + head_len;2839data_len = msdu->len - head_len - tail_len;2840key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];28412842ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);2843if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))2844goto mic_fail;28452846return 0;28472848mic_fail:2849(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;2850(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;28512852rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |2853RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;2854skb_pull(msdu, hal_rx_desc_sz);28552856ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);2857ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,2858HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);2859ieee80211_rx(ar->hw, msdu);2860return -EINVAL;2861}28622863static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,2864enum hal_encrypt_type enctype, u32 flags)2865{2866struct ieee80211_hdr *hdr;2867size_t hdr_len;2868size_t crypto_len;2869u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;28702871if (!flags)2872return;28732874hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);28752876if (flags & RX_FLAG_MIC_STRIPPED)2877skb_trim(msdu, msdu->len -2878ath12k_dp_rx_crypto_mic_len(ar, enctype));28792880if (flags & RX_FLAG_ICV_STRIPPED)2881skb_trim(msdu, msdu->len -2882ath12k_dp_rx_crypto_icv_len(ar, enctype));28832884if (flags & RX_FLAG_IV_STRIPPED) {2885hdr_len = ieee80211_hdrlen(hdr->frame_control);2886crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);28872888memmove(msdu->data + hal_rx_desc_sz + crypto_len,2889msdu->data + hal_rx_desc_sz, hdr_len);2890skb_pull(msdu, crypto_len);2891}2892}28932894static int ath12k_dp_rx_h_defrag(struct ath12k *ar,2895struct ath12k_peer *peer,2896struct ath12k_dp_rx_tid *rx_tid,2897struct sk_buff **defrag_skb)2898{2899struct ath12k_base *ab = ar->ab;2900struct hal_rx_desc *rx_desc;2901struct sk_buff *skb, *first_frag, *last_frag;2902struct ieee80211_hdr *hdr;2903enum hal_encrypt_type enctype;2904bool is_decrypted = false;2905int msdu_len = 0;2906int extra_space;2907u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;29082909first_frag = skb_peek(&rx_tid->rx_frags);2910last_frag = skb_peek_tail(&rx_tid->rx_frags);29112912skb_queue_walk(&rx_tid->rx_frags, skb) {2913flags = 0;2914rx_desc = (struct hal_rx_desc *)skb->data;2915hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);29162917enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);2918if (enctype != HAL_ENCRYPT_TYPE_OPEN)2919is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,2920rx_desc);29212922if (is_decrypted) {2923if (skb != first_frag)2924flags |= RX_FLAG_IV_STRIPPED;2925if (skb != last_frag)2926flags |= RX_FLAG_ICV_STRIPPED |2927RX_FLAG_MIC_STRIPPED;2928}29292930/* RX fragments are always raw packets */2931if (skb != last_frag)2932skb_trim(skb, skb->len - FCS_LEN);2933ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);29342935if (skb != first_frag)2936skb_pull(skb, hal_rx_desc_sz +2937ieee80211_hdrlen(hdr->frame_control));2938msdu_len += skb->len;2939}29402941extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));2942if (extra_space > 0 &&2943(pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))2944return -ENOMEM;29452946__skb_unlink(first_frag, &rx_tid->rx_frags);2947while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {2948skb_put_data(first_frag, skb->data, skb->len);2949dev_kfree_skb_any(skb);2950}29512952hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);2953hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);2954ATH12K_SKB_RXCB(first_frag)->is_frag = 1;29552956if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))2957first_frag = NULL;29582959*defrag_skb = first_frag;2960return 0;2961}29622963static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,2964struct ath12k_dp_rx_tid *rx_tid,2965struct sk_buff *defrag_skb)2966{2967struct ath12k_base *ab = ar->ab;2968struct ath12k_dp *dp = &ab->dp;2969struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;2970struct hal_reo_entrance_ring *reo_ent_ring;2971struct hal_reo_dest_ring *reo_dest_ring;2972struct dp_link_desc_bank *link_desc_banks;2973struct hal_rx_msdu_link *msdu_link;2974struct hal_rx_msdu_details *msdu0;2975struct hal_srng *srng;2976dma_addr_t link_paddr, buf_paddr;2977u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;2978u32 cookie, hal_rx_desc_sz, dest_ring_info0;2979int ret;2980struct ath12k_rx_desc_info *desc_info;2981u8 dst_ind;29822983hal_rx_desc_sz = ab->hw_params->hal_desc_sz;2984link_desc_banks = dp->link_desc_banks;2985reo_dest_ring = rx_tid->dst_ring_desc;29862987ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,2988&link_paddr, &cookie);2989desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);29902991#if defined(__linux__)2992msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +2993(link_paddr - link_desc_banks[desc_bank].paddr));2994#elif defined(__FreeBSD__)2995msdu_link = (struct hal_rx_msdu_link *)((uintptr_t)link_desc_banks[desc_bank].vaddr +2996(link_paddr - link_desc_banks[desc_bank].paddr));2997#endif2998msdu0 = &msdu_link->msdu_link[0];2999msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);3000dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);30013002memset(msdu0, 0, sizeof(*msdu0));30033004msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |3005u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |3006u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |3007u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,3008RX_MSDU_DESC_INFO0_MSDU_LENGTH) |3009u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |3010u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);3011msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);3012msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);30133014/* change msdu len in hal rx desc */3015ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);30163017buf_paddr = dma_map_single(ab->dev, defrag_skb->data,3018defrag_skb->len + skb_tailroom(defrag_skb),3019DMA_FROM_DEVICE);3020if (dma_mapping_error(ab->dev, buf_paddr))3021return -ENOMEM;30223023spin_lock_bh(&dp->rx_desc_lock);3024desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,3025struct ath12k_rx_desc_info,3026list);3027if (!desc_info) {3028spin_unlock_bh(&dp->rx_desc_lock);3029ath12k_warn(ab, "failed to find rx desc for reinject\n");3030ret = -ENOMEM;3031goto err_unmap_dma;3032}30333034desc_info->skb = defrag_skb;30353036list_del(&desc_info->list);3037list_add_tail(&desc_info->list, &dp->rx_desc_used_list);3038spin_unlock_bh(&dp->rx_desc_lock);30393040ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;30413042ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,3043desc_info->cookie,3044HAL_RX_BUF_RBM_SW3_BM);30453046/* Fill mpdu details into reo entrace ring */3047srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];30483049spin_lock_bh(&srng->lock);3050ath12k_hal_srng_access_begin(ab, srng);30513052reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);3053if (!reo_ent_ring) {3054ath12k_hal_srng_access_end(ab, srng);3055spin_unlock_bh(&srng->lock);3056ret = -ENOSPC;3057goto err_free_desc;3058}3059memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));30603061ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,3062cookie,3063HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);30643065mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |3066u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |3067u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |3068u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |3069u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);30703071reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);3072reo_ent_ring->rx_mpdu_info.peer_meta_data =3073reo_dest_ring->rx_mpdu_info.peer_meta_data;30743075/* Firmware expects physical address to be filled in queue_addr_lo in3076* the MLO scenario and in case of non MLO peer meta data needs to be3077* filled.3078* TODO: Need to handle for MLO scenario.3079*/3080reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;3081reo_ent_ring->info0 = le32_encode_bits(dst_ind,3082HAL_REO_ENTR_RING_INFO0_DEST_IND);30833084reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,3085HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);3086dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,3087HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);3088reo_ent_ring->info2 =3089cpu_to_le32(u32_get_bits(dest_ring_info0,3090HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));30913092ath12k_hal_srng_access_end(ab, srng);3093spin_unlock_bh(&srng->lock);30943095return 0;30963097err_free_desc:3098spin_lock_bh(&dp->rx_desc_lock);3099list_del(&desc_info->list);3100list_add_tail(&desc_info->list, &dp->rx_desc_free_list);3101desc_info->skb = NULL;3102spin_unlock_bh(&dp->rx_desc_lock);3103err_unmap_dma:3104dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),3105DMA_FROM_DEVICE);3106return ret;3107}31083109static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,3110struct sk_buff *a, struct sk_buff *b)3111{3112int frag1, frag2;31133114frag1 = ath12k_dp_rx_h_frag_no(ab, a);3115frag2 = ath12k_dp_rx_h_frag_no(ab, b);31163117return frag1 - frag2;3118}31193120static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,3121struct sk_buff_head *frag_list,3122struct sk_buff *cur_frag)3123{3124struct sk_buff *skb;3125int cmp;31263127skb_queue_walk(frag_list, skb) {3128cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);3129if (cmp < 0)3130continue;3131__skb_queue_before(frag_list, skb, cur_frag);3132return;3133}3134__skb_queue_tail(frag_list, cur_frag);3135}31363137static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)3138{3139struct ieee80211_hdr *hdr;3140u64 pn = 0;3141u8 *ehdr;3142u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;31433144hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);3145ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);31463147pn = ehdr[0];3148pn |= (u64)ehdr[1] << 8;3149pn |= (u64)ehdr[4] << 16;3150pn |= (u64)ehdr[5] << 24;3151pn |= (u64)ehdr[6] << 32;3152pn |= (u64)ehdr[7] << 40;31533154return pn;3155}31563157static bool3158ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)3159{3160struct ath12k_base *ab = ar->ab;3161enum hal_encrypt_type encrypt_type;3162struct sk_buff *first_frag, *skb;3163struct hal_rx_desc *desc;3164u64 last_pn;3165u64 cur_pn;31663167first_frag = skb_peek(&rx_tid->rx_frags);3168desc = (struct hal_rx_desc *)first_frag->data;31693170encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);3171if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&3172encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&3173encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&3174encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)3175return true;31763177last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);3178skb_queue_walk(&rx_tid->rx_frags, skb) {3179if (skb == first_frag)3180continue;31813182cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);3183if (cur_pn != last_pn + 1)3184return false;3185last_pn = cur_pn;3186}3187return true;3188}31893190static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,3191struct sk_buff *msdu,3192struct hal_reo_dest_ring *ring_desc)3193{3194struct ath12k_base *ab = ar->ab;3195struct hal_rx_desc *rx_desc;3196struct ath12k_peer *peer;3197struct ath12k_dp_rx_tid *rx_tid;3198struct sk_buff *defrag_skb = NULL;3199u32 peer_id;3200u16 seqno, frag_no;3201u8 tid;3202int ret = 0;3203bool more_frags;32043205rx_desc = (struct hal_rx_desc *)msdu->data;3206peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);3207tid = ath12k_dp_rx_h_tid(ab, rx_desc);3208seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);3209frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);3210more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);32113212if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||3213!ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||3214tid > IEEE80211_NUM_TIDS)3215return -EINVAL;32163217/* received unfragmented packet in reo3218* exception ring, this shouldn't happen3219* as these packets typically come from3220* reo2sw srngs.3221*/3222if (WARN_ON_ONCE(!frag_no && !more_frags))3223return -EINVAL;32243225spin_lock_bh(&ab->base_lock);3226peer = ath12k_peer_find_by_id(ab, peer_id);3227if (!peer) {3228ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",3229peer_id);3230ret = -ENOENT;3231goto out_unlock;3232}3233rx_tid = &peer->rx_tid[tid];32343235if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||3236skb_queue_empty(&rx_tid->rx_frags)) {3237/* Flush stored fragments and start a new sequence */3238ath12k_dp_rx_frags_cleanup(rx_tid, true);3239rx_tid->cur_sn = seqno;3240}32413242if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {3243/* Fragment already present */3244ret = -EINVAL;3245goto out_unlock;3246}32473248if (frag_no > __fls(rx_tid->rx_frag_bitmap))3249__skb_queue_tail(&rx_tid->rx_frags, msdu);3250else3251ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);32523253rx_tid->rx_frag_bitmap |= BIT(frag_no);3254if (!more_frags)3255rx_tid->last_frag_no = frag_no;32563257if (frag_no == 0) {3258rx_tid->dst_ring_desc = kmemdup(ring_desc,3259sizeof(*rx_tid->dst_ring_desc),3260GFP_ATOMIC);3261if (!rx_tid->dst_ring_desc) {3262ret = -ENOMEM;3263goto out_unlock;3264}3265} else {3266ath12k_dp_rx_link_desc_return(ab, ring_desc,3267HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);3268}32693270if (!rx_tid->last_frag_no ||3271rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {3272mod_timer(&rx_tid->frag_timer, jiffies +3273ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);3274goto out_unlock;3275}32763277spin_unlock_bh(&ab->base_lock);3278del_timer_sync(&rx_tid->frag_timer);3279spin_lock_bh(&ab->base_lock);32803281peer = ath12k_peer_find_by_id(ab, peer_id);3282if (!peer)3283goto err_frags_cleanup;32843285if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))3286goto err_frags_cleanup;32873288if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))3289goto err_frags_cleanup;32903291if (!defrag_skb)3292goto err_frags_cleanup;32933294if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))3295goto err_frags_cleanup;32963297ath12k_dp_rx_frags_cleanup(rx_tid, false);3298goto out_unlock;32993300err_frags_cleanup:3301dev_kfree_skb_any(defrag_skb);3302ath12k_dp_rx_frags_cleanup(rx_tid, true);3303out_unlock:3304spin_unlock_bh(&ab->base_lock);3305return ret;3306}33073308static int3309ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,3310bool drop, u32 cookie)3311{3312struct ath12k_base *ab = ar->ab;3313struct sk_buff *msdu;3314struct ath12k_skb_rxcb *rxcb;3315struct hal_rx_desc *rx_desc;3316u16 msdu_len;3317u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;3318struct ath12k_rx_desc_info *desc_info;3319u64 desc_va;33203321desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |3322le32_to_cpu(desc->buf_va_lo));3323desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);33243325/* retry manual desc retrieval */3326if (!desc_info) {3327desc_info = ath12k_dp_get_rx_desc(ab, cookie);3328if (!desc_info) {3329ath12k_warn(ab, "Invalid cookie in manual desc retrieval");3330return -EINVAL;3331}3332}33333334if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)3335ath12k_warn(ab, " RX Exception, Check HW CC implementation");33363337msdu = desc_info->skb;3338desc_info->skb = NULL;3339spin_lock_bh(&ab->dp.rx_desc_lock);3340list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);3341spin_unlock_bh(&ab->dp.rx_desc_lock);33423343rxcb = ATH12K_SKB_RXCB(msdu);3344dma_unmap_single(ar->ab->dev, rxcb->paddr,3345msdu->len + skb_tailroom(msdu),3346DMA_FROM_DEVICE);33473348if (drop) {3349dev_kfree_skb_any(msdu);3350return 0;3351}33523353rcu_read_lock();3354if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {3355dev_kfree_skb_any(msdu);3356goto exit;3357}33583359if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {3360dev_kfree_skb_any(msdu);3361goto exit;3362}33633364rx_desc = (struct hal_rx_desc *)msdu->data;3365msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);3366if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {3367ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);3368ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,3369sizeof(*rx_desc));3370dev_kfree_skb_any(msdu);3371goto exit;3372}33733374skb_put(msdu, hal_rx_desc_sz + msdu_len);33753376if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {3377dev_kfree_skb_any(msdu);3378ath12k_dp_rx_link_desc_return(ar->ab, desc,3379HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);3380}3381exit:3382rcu_read_unlock();3383return 0;3384}33853386int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,3387int budget)3388{3389u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];3390struct dp_link_desc_bank *link_desc_banks;3391enum hal_rx_buf_return_buf_manager rbm;3392struct hal_rx_msdu_link *link_desc_va;3393int tot_n_bufs_reaped, quota, ret, i;3394struct hal_reo_dest_ring *reo_desc;3395struct dp_rxdma_ring *rx_ring;3396struct dp_srng *reo_except;3397u32 desc_bank, num_msdus;3398struct hal_srng *srng;3399struct ath12k_dp *dp;3400int mac_id;3401struct ath12k *ar;3402dma_addr_t paddr;3403bool is_frag;3404bool drop = false;3405int pdev_id;34063407tot_n_bufs_reaped = 0;3408quota = budget;34093410dp = &ab->dp;3411reo_except = &dp->reo_except_ring;3412link_desc_banks = dp->link_desc_banks;34133414srng = &ab->hal.srng_list[reo_except->ring_id];34153416spin_lock_bh(&srng->lock);34173418ath12k_hal_srng_access_begin(ab, srng);34193420while (budget &&3421(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {3422ab->soc_stats.err_ring_pkts++;3423ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,3424&desc_bank);3425if (ret) {3426ath12k_warn(ab, "failed to parse error reo desc %d\n",3427ret);3428continue;3429}3430#if defined(__linux__)3431link_desc_va = link_desc_banks[desc_bank].vaddr +3432(paddr - link_desc_banks[desc_bank].paddr);3433#elif defined(__FreeBSD__)3434link_desc_va = (void *)((uintptr_t)link_desc_banks[desc_bank].vaddr +3435(paddr - link_desc_banks[desc_bank].paddr));3436#endif3437ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,3438&rbm);3439if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&3440rbm != HAL_RX_BUF_RBM_SW3_BM &&3441rbm != ab->hw_params->hal_params->rx_buf_rbm) {3442ab->soc_stats.invalid_rbm++;3443ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);3444ath12k_dp_rx_link_desc_return(ab, reo_desc,3445HAL_WBM_REL_BM_ACT_REL_MSDU);3446continue;3447}34483449is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &3450RX_MPDU_DESC_INFO0_FRAG_FLAG);34513452/* Process only rx fragments with one msdu per link desc below, and drop3453* msdu's indicated due to error reasons.3454*/3455if (!is_frag || num_msdus > 1) {3456drop = true;3457/* Return the link desc back to wbm idle list */3458ath12k_dp_rx_link_desc_return(ab, reo_desc,3459HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);3460}34613462for (i = 0; i < num_msdus; i++) {3463mac_id = le32_get_bits(reo_desc->info0,3464HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);34653466pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);3467ar = ab->pdevs[pdev_id].ar;34683469if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,3470msdu_cookies[i]))3471tot_n_bufs_reaped++;3472}34733474if (tot_n_bufs_reaped >= quota) {3475tot_n_bufs_reaped = quota;3476goto exit;3477}34783479budget = quota - tot_n_bufs_reaped;3480}34813482exit:3483ath12k_hal_srng_access_end(ab, srng);34843485spin_unlock_bh(&srng->lock);34863487rx_ring = &dp->rx_refill_buf_ring;34883489ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,3490ab->hw_params->hal_params->rx_buf_rbm, true);34913492return tot_n_bufs_reaped;3493}34943495static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,3496int msdu_len,3497struct sk_buff_head *msdu_list)3498{3499struct sk_buff *skb, *tmp;3500struct ath12k_skb_rxcb *rxcb;3501int n_buffs;35023503n_buffs = DIV_ROUND_UP(msdu_len,3504(DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));35053506skb_queue_walk_safe(msdu_list, skb, tmp) {3507rxcb = ATH12K_SKB_RXCB(skb);3508if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&3509rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {3510if (!n_buffs)3511break;3512__skb_unlink(skb, msdu_list);3513dev_kfree_skb_any(skb);3514n_buffs--;3515}3516}3517}35183519static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,3520struct ieee80211_rx_status *status,3521struct sk_buff_head *msdu_list)3522{3523struct ath12k_base *ab = ar->ab;3524u16 msdu_len, peer_id;3525struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;3526u8 l3pad_bytes;3527struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);3528u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;35293530msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);3531peer_id = ath12k_dp_rx_h_peer_id(ab, desc);35323533spin_lock(&ab->base_lock);3534if (!ath12k_peer_find_by_id(ab, peer_id)) {3535spin_unlock(&ab->base_lock);3536ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",3537peer_id);3538return -EINVAL;3539}3540spin_unlock(&ab->base_lock);35413542if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {3543/* First buffer will be freed by the caller, so deduct it's length */3544msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);3545ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);3546return -EINVAL;3547}35483549/* Even after cleaning up the sg buffers in the msdu list with above check3550* any msdu received with continuation flag needs to be dropped as invalid.3551* This protects against some random err frame with continuation flag.3552*/3553if (rxcb->is_continuation)3554return -EINVAL;35553556if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {3557ath12k_warn(ar->ab,3558"msdu_done bit not set in null_q_des processing\n");3559__skb_queue_purge(msdu_list);3560return -EIO;3561}35623563/* Handle NULL queue descriptor violations arising out a missing3564* REO queue for a given peer or a given TID. This typically3565* may happen if a packet is received on a QOS enabled TID before the3566* ADDBA negotiation for that TID, when the TID queue is setup. Or3567* it may also happen for MC/BC frames if they are not routed to the3568* non-QOS TID queue, in the absence of any other default TID queue.3569* This error can show up both in a REO destination or WBM release ring.3570*/35713572if (rxcb->is_frag) {3573skb_pull(msdu, hal_rx_desc_sz);3574} else {3575l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);35763577if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)3578return -EINVAL;35793580skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);3581skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);3582}3583ath12k_dp_rx_h_ppdu(ar, desc, status);35843585ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);35863587rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);35883589/* Please note that caller will having the access to msdu and completing3590* rx with mac80211. Need not worry about cleaning up amsdu_list.3591*/35923593return 0;3594}35953596static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,3597struct ieee80211_rx_status *status,3598struct sk_buff_head *msdu_list)3599{3600struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);3601bool drop = false;36023603ar->ab->soc_stats.reo_error[rxcb->err_code]++;36043605switch (rxcb->err_code) {3606case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:3607if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))3608drop = true;3609break;3610case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:3611/* TODO: Do not drop PN failed packets in the driver;3612* instead, it is good to drop such packets in mac802113613* after incrementing the replay counters.3614*/3615fallthrough;3616default:3617/* TODO: Review other errors and process them to mac802113618* as appropriate.3619*/3620drop = true;3621break;3622}36233624return drop;3625}36263627static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,3628struct ieee80211_rx_status *status)3629{3630struct ath12k_base *ab = ar->ab;3631u16 msdu_len;3632struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;3633u8 l3pad_bytes;3634struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);3635u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;36363637rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);3638rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);36393640l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);3641msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);3642skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);3643skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);36443645ath12k_dp_rx_h_ppdu(ar, desc, status);36463647status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |3648RX_FLAG_DECRYPTED);36493650ath12k_dp_rx_h_undecap(ar, msdu, desc,3651HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);3652}36533654static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,3655struct ieee80211_rx_status *status)3656{3657struct ath12k_base *ab = ar->ab;3658struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);3659struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;3660bool drop = false;3661u32 err_bitmap;36623663ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;36643665switch (rxcb->err_code) {3666case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:3667case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:3668err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);3669if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {3670ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);3671break;3672}3673fallthrough;3674default:3675/* TODO: Review other rxdma error code to check if anything is3676* worth reporting to mac802113677*/3678drop = true;3679break;3680}36813682return drop;3683}36843685static void ath12k_dp_rx_wbm_err(struct ath12k *ar,3686struct napi_struct *napi,3687struct sk_buff *msdu,3688struct sk_buff_head *msdu_list)3689{3690struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);3691struct ieee80211_rx_status rxs = {0};3692bool drop = true;36933694switch (rxcb->err_rel_src) {3695case HAL_WBM_REL_SRC_MODULE_REO:3696drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);3697break;3698case HAL_WBM_REL_SRC_MODULE_RXDMA:3699drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);3700break;3701default:3702/* msdu will get freed */3703break;3704}37053706if (drop) {3707dev_kfree_skb_any(msdu);3708return;3709}37103711ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);3712}37133714int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,3715struct napi_struct *napi, int budget)3716{3717struct ath12k *ar;3718struct ath12k_dp *dp = &ab->dp;3719struct dp_rxdma_ring *rx_ring;3720struct hal_rx_wbm_rel_info err_info;3721struct hal_srng *srng;3722struct sk_buff *msdu;3723struct sk_buff_head msdu_list[MAX_RADIOS];3724struct ath12k_skb_rxcb *rxcb;3725void *rx_desc;3726int mac_id;3727int num_buffs_reaped = 0;3728struct ath12k_rx_desc_info *desc_info;3729int ret, i;37303731for (i = 0; i < ab->num_radios; i++)3732__skb_queue_head_init(&msdu_list[i]);37333734srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];3735rx_ring = &dp->rx_refill_buf_ring;37363737spin_lock_bh(&srng->lock);37383739ath12k_hal_srng_access_begin(ab, srng);37403741while (budget) {3742rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);3743if (!rx_desc)3744break;37453746ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);3747if (ret) {3748ath12k_warn(ab,3749"failed to parse rx error in wbm_rel ring desc %d\n",3750ret);3751continue;3752}37533754desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;37553756/* retry manual desc retrieval if hw cc is not done */3757if (!desc_info) {3758desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);3759if (!desc_info) {3760ath12k_warn(ab, "Invalid cookie in manual desc retrieval");3761continue;3762}3763}37643765/* FIXME: Extract mac id correctly. Since descs are not tied3766* to mac, we can extract from vdev id in ring desc.3767*/3768mac_id = 0;37693770if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)3771ath12k_warn(ab, "WBM RX err, Check HW CC implementation");37723773msdu = desc_info->skb;3774desc_info->skb = NULL;37753776spin_lock_bh(&dp->rx_desc_lock);3777list_move_tail(&desc_info->list, &dp->rx_desc_free_list);3778spin_unlock_bh(&dp->rx_desc_lock);37793780rxcb = ATH12K_SKB_RXCB(msdu);3781dma_unmap_single(ab->dev, rxcb->paddr,3782msdu->len + skb_tailroom(msdu),3783DMA_FROM_DEVICE);37843785num_buffs_reaped++;37863787if (!err_info.continuation)3788budget--;37893790if (err_info.push_reason !=3791HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {3792dev_kfree_skb_any(msdu);3793continue;3794}37953796rxcb->err_rel_src = err_info.err_rel_src;3797rxcb->err_code = err_info.err_code;3798rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;3799__skb_queue_tail(&msdu_list[mac_id], msdu);38003801rxcb->is_first_msdu = err_info.first_msdu;3802rxcb->is_last_msdu = err_info.last_msdu;3803rxcb->is_continuation = err_info.continuation;3804}38053806ath12k_hal_srng_access_end(ab, srng);38073808spin_unlock_bh(&srng->lock);38093810if (!num_buffs_reaped)3811goto done;38123813ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,3814ab->hw_params->hal_params->rx_buf_rbm, true);38153816rcu_read_lock();3817for (i = 0; i < ab->num_radios; i++) {3818if (!rcu_dereference(ab->pdevs_active[i])) {3819__skb_queue_purge(&msdu_list[i]);3820continue;3821}38223823ar = ab->pdevs[i].ar;38243825if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {3826__skb_queue_purge(&msdu_list[i]);3827continue;3828}38293830while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)3831ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);3832}3833rcu_read_unlock();3834done:3835return num_buffs_reaped;3836}38373838void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)3839{3840struct ath12k_dp *dp = &ab->dp;3841struct hal_tlv_64_hdr *hdr;3842struct hal_srng *srng;3843struct ath12k_dp_rx_reo_cmd *cmd, *tmp;3844bool found = false;3845u16 tag;3846struct hal_reo_status reo_status;38473848srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];38493850memset(&reo_status, 0, sizeof(reo_status));38513852spin_lock_bh(&srng->lock);38533854ath12k_hal_srng_access_begin(ab, srng);38553856while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {3857tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);38583859switch (tag) {3860case HAL_REO_GET_QUEUE_STATS_STATUS:3861ath12k_hal_reo_status_queue_stats(ab, hdr,3862&reo_status);3863break;3864case HAL_REO_FLUSH_QUEUE_STATUS:3865ath12k_hal_reo_flush_queue_status(ab, hdr,3866&reo_status);3867break;3868case HAL_REO_FLUSH_CACHE_STATUS:3869ath12k_hal_reo_flush_cache_status(ab, hdr,3870&reo_status);3871break;3872case HAL_REO_UNBLOCK_CACHE_STATUS:3873ath12k_hal_reo_unblk_cache_status(ab, hdr,3874&reo_status);3875break;3876case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:3877ath12k_hal_reo_flush_timeout_list_status(ab, hdr,3878&reo_status);3879break;3880case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:3881ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,3882&reo_status);3883break;3884case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:3885ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,3886&reo_status);3887break;3888default:3889ath12k_warn(ab, "Unknown reo status type %d\n", tag);3890continue;3891}38923893spin_lock_bh(&dp->reo_cmd_lock);3894list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {3895if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {3896found = true;3897list_del(&cmd->list);3898break;3899}3900}3901spin_unlock_bh(&dp->reo_cmd_lock);39023903if (found) {3904cmd->handler(dp, (void *)&cmd->data,3905reo_status.uniform_hdr.cmd_status);3906kfree(cmd);3907}39083909found = false;3910}39113912ath12k_hal_srng_access_end(ab, srng);39133914spin_unlock_bh(&srng->lock);3915}39163917void ath12k_dp_rx_free(struct ath12k_base *ab)3918{3919struct ath12k_dp *dp = &ab->dp;3920int i;39213922ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);39233924for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {3925if (ab->hw_params->rx_mac_buf_ring)3926ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);3927}39283929for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)3930ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);39313932ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);3933ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);39343935ath12k_dp_rxdma_buf_free(ab);3936}39373938void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)3939{3940struct ath12k *ar = ab->pdevs[mac_id].ar;39413942ath12k_dp_rx_pdev_srng_free(ar);3943}39443945int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)3946{3947struct ath12k_dp *dp = &ab->dp;3948struct htt_rx_ring_tlv_filter tlv_filter = {0};3949u32 ring_id;3950int ret;3951u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;39523953ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;39543955tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;3956tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;3957tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |3958HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |3959HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;3960tlv_filter.offset_valid = true;3961tlv_filter.rx_packet_offset = hal_rx_desc_sz;39623963tlv_filter.rx_mpdu_start_offset =3964ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();3965tlv_filter.rx_msdu_end_offset =3966ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();39673968/* TODO: Selectively subscribe to required qwords within msdu_end3969* and mpdu_start and setup the mask in below msg3970* and modify the rx_desc struct3971*/3972ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,3973HAL_RXDMA_BUF,3974DP_RXDMA_REFILL_RING_SIZE,3975&tlv_filter);39763977return ret;3978}39793980int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)3981{3982struct ath12k_dp *dp = &ab->dp;3983struct htt_rx_ring_tlv_filter tlv_filter = {0};3984u32 ring_id;3985int ret;3986u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;3987int i;39883989ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;39903991tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;3992tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;3993tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |3994HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |3995HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;3996tlv_filter.offset_valid = true;3997tlv_filter.rx_packet_offset = hal_rx_desc_sz;39983999tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);40004001tlv_filter.rx_mpdu_start_offset =4002ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();4003tlv_filter.rx_msdu_end_offset =4004ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();40054006/* TODO: Selectively subscribe to required qwords within msdu_end4007* and mpdu_start and setup the mask in below msg4008* and modify the rx_desc struct4009*/40104011for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {4012ring_id = dp->rx_mac_buf_ring[i].ring_id;4013ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,4014HAL_RXDMA_BUF,4015DP_RXDMA_REFILL_RING_SIZE,4016&tlv_filter);4017}40184019return ret;4020}40214022int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)4023{4024struct ath12k_dp *dp = &ab->dp;4025u32 ring_id;4026int i, ret;40274028/* TODO: Need to verify the HTT setup for QCN9224 */4029ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;4030ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);4031if (ret) {4032ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",4033ret);4034return ret;4035}40364037if (ab->hw_params->rx_mac_buf_ring) {4038for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {4039ring_id = dp->rx_mac_buf_ring[i].ring_id;4040ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,4041i, HAL_RXDMA_BUF);4042if (ret) {4043ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",4044i, ret);4045return ret;4046}4047}4048}40494050for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {4051ring_id = dp->rxdma_err_dst_ring[i].ring_id;4052ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,4053i, HAL_RXDMA_DST);4054if (ret) {4055ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",4056i, ret);4057return ret;4058}4059}40604061if (ab->hw_params->rxdma1_enable) {4062ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;4063ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,40640, HAL_RXDMA_MONITOR_BUF);4065if (ret) {4066ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",4067ret);4068return ret;4069}40704071ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;4072ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,40730, HAL_TX_MONITOR_BUF);4074if (ret) {4075ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",4076ret);4077return ret;4078}4079}40804081ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);4082if (ret) {4083ath12k_warn(ab, "failed to setup rxdma ring selection config\n");4084return ret;4085}40864087return 0;4088}40894090int ath12k_dp_rx_alloc(struct ath12k_base *ab)4091{4092struct ath12k_dp *dp = &ab->dp;4093int i, ret;40944095idr_init(&dp->rx_refill_buf_ring.bufs_idr);4096spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);40974098idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);4099spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);41004101idr_init(&dp->tx_mon_buf_ring.bufs_idr);4102spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);41034104ret = ath12k_dp_srng_setup(ab,4105&dp->rx_refill_buf_ring.refill_buf_ring,4106HAL_RXDMA_BUF, 0, 0,4107DP_RXDMA_BUF_RING_SIZE);4108if (ret) {4109ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");4110return ret;4111}41124113if (ab->hw_params->rx_mac_buf_ring) {4114for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {4115ret = ath12k_dp_srng_setup(ab,4116&dp->rx_mac_buf_ring[i],4117HAL_RXDMA_BUF, 1,4118i, 1024);4119if (ret) {4120ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",4121i);4122return ret;4123}4124}4125}41264127for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {4128ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],4129HAL_RXDMA_DST, 0, i,4130DP_RXDMA_ERR_DST_RING_SIZE);4131if (ret) {4132ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);4133return ret;4134}4135}41364137if (ab->hw_params->rxdma1_enable) {4138ret = ath12k_dp_srng_setup(ab,4139&dp->rxdma_mon_buf_ring.refill_buf_ring,4140HAL_RXDMA_MONITOR_BUF, 0, 0,4141DP_RXDMA_MONITOR_BUF_RING_SIZE);4142if (ret) {4143ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");4144return ret;4145}41464147ret = ath12k_dp_srng_setup(ab,4148&dp->tx_mon_buf_ring.refill_buf_ring,4149HAL_TX_MONITOR_BUF, 0, 0,4150DP_TX_MONITOR_BUF_RING_SIZE);4151if (ret) {4152ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");4153return ret;4154}4155}41564157ret = ath12k_dp_rxdma_buf_setup(ab);4158if (ret) {4159ath12k_warn(ab, "failed to setup rxdma ring\n");4160return ret;4161}41624163return 0;4164}41654166int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)4167{4168struct ath12k *ar = ab->pdevs[mac_id].ar;4169struct ath12k_pdev_dp *dp = &ar->dp;4170u32 ring_id;4171int i;4172int ret;41734174if (!ab->hw_params->rxdma1_enable)4175goto out;41764177ret = ath12k_dp_rx_pdev_srng_alloc(ar);4178if (ret) {4179ath12k_warn(ab, "failed to setup rx srngs\n");4180return ret;4181}41824183for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {4184ring_id = dp->rxdma_mon_dst_ring[i].ring_id;4185ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,4186mac_id + i,4187HAL_RXDMA_MONITOR_DST);4188if (ret) {4189ath12k_warn(ab,4190"failed to configure rxdma_mon_dst_ring %d %d\n",4191i, ret);4192return ret;4193}41944195ring_id = dp->tx_mon_dst_ring[i].ring_id;4196ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,4197mac_id + i,4198HAL_TX_MONITOR_DST);4199if (ret) {4200ath12k_warn(ab,4201"failed to configure tx_mon_dst_ring %d %d\n",4202i, ret);4203return ret;4204}4205}4206out:4207return 0;4208}42094210static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)4211{4212struct ath12k_pdev_dp *dp = &ar->dp;4213struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;42144215skb_queue_head_init(&pmon->rx_status_q);42164217pmon->mon_ppdu_status = DP_PPDU_STATUS_START;42184219memset(&pmon->rx_mon_stats, 0,4220sizeof(pmon->rx_mon_stats));4221return 0;4222}42234224int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)4225{4226struct ath12k_pdev_dp *dp = &ar->dp;4227struct ath12k_mon_data *pmon = &dp->mon_data;4228int ret = 0;42294230ret = ath12k_dp_rx_pdev_mon_status_attach(ar);4231if (ret) {4232ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");4233return ret;4234}42354236/* if rxdma1_enable is false, no need to setup4237* rxdma_mon_desc_ring.4238*/4239if (!ar->ab->hw_params->rxdma1_enable)4240return 0;42414242pmon->mon_last_linkdesc_paddr = 0;4243pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;4244spin_lock_init(&pmon->mon_lock);42454246return 0;4247}42484249int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)4250{4251/* start reap timer */4252mod_timer(&ab->mon_reap_timer,4253jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));42544255return 0;4256}42574258int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)4259{4260int ret;42614262if (stop_timer)4263del_timer_sync(&ab->mon_reap_timer);42644265/* reap all the monitor related rings */4266ret = ath12k_dp_purge_mon_ring(ab);4267if (ret) {4268ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);4269return ret;4270}42714272return 0;4273}427442754276