Path: blob/main/sys/contrib/dev/athk/ath10k/htt_tx.c
48375 views
// SPDX-License-Identifier: ISC1/*2* Copyright (c) 2005-2011 Atheros Communications Inc.3* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.4*/56#include <linux/etherdevice.h>7#include "htt.h"8#include "mac.h"9#include "hif.h"10#include "txrx.h"11#include "debug.h"1213static u8 ath10k_htt_tx_txq_calc_size(size_t count)14{15int exp;16int factor;1718exp = 0;19factor = count >> 7;2021while (factor >= 64 && exp < 4) {22factor >>= 3;23exp++;24}2526if (exp == 4)27return 0xff;2829if (count > 0)30factor = max(1, factor);3132return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |33SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);34}3536static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,37struct ieee80211_txq *txq)38{39struct ath10k *ar = hw->priv;40struct ath10k_sta *arsta;41struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;42unsigned long frame_cnt;43unsigned long byte_cnt;44int idx;45u32 bit;46u16 peer_id;47u8 tid;48u8 count;4950lockdep_assert_held(&ar->htt.tx_lock);5152if (!ar->htt.tx_q_state.enabled)53return;5455if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)56return;5758if (txq->sta) {59arsta = (void *)txq->sta->drv_priv;60peer_id = arsta->peer_id;61} else {62peer_id = arvif->peer_id;63}6465tid = txq->tid;66bit = BIT(peer_id % 32);67idx = peer_id / 32;6869ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);70count = ath10k_htt_tx_txq_calc_size(byte_cnt);7172if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||73unlikely(tid >= ar->htt.tx_q_state.num_tids)) {74ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n",75peer_id, tid);76return;77}7879ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;80ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;81ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;8283ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n",84peer_id, tid, count);85}8687static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)88{89u32 seq;90size_t size;9192lockdep_assert_held(&ar->htt.tx_lock);9394if (!ar->htt.tx_q_state.enabled)95return;9697if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)98return;99100seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);101seq++;102ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);103104ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",105seq);106107size = sizeof(*ar->htt.tx_q_state.vaddr);108dma_sync_single_for_device(ar->dev,109ar->htt.tx_q_state.paddr,110size,111DMA_TO_DEVICE);112}113114void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,115struct ieee80211_txq *txq)116{117struct ath10k *ar = hw->priv;118119spin_lock_bh(&ar->htt.tx_lock);120__ath10k_htt_tx_txq_recalc(hw, txq);121spin_unlock_bh(&ar->htt.tx_lock);122}123124void ath10k_htt_tx_txq_sync(struct ath10k *ar)125{126spin_lock_bh(&ar->htt.tx_lock);127__ath10k_htt_tx_txq_sync(ar);128spin_unlock_bh(&ar->htt.tx_lock);129}130131void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,132struct ieee80211_txq *txq)133{134struct ath10k *ar = hw->priv;135136spin_lock_bh(&ar->htt.tx_lock);137__ath10k_htt_tx_txq_recalc(hw, txq);138__ath10k_htt_tx_txq_sync(ar);139spin_unlock_bh(&ar->htt.tx_lock);140}141142void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)143{144lockdep_assert_held(&htt->tx_lock);145146htt->num_pending_tx--;147if (htt->num_pending_tx == htt->max_num_pending_tx - 1)148ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);149150if (htt->num_pending_tx == 0)151wake_up(&htt->empty_tx_wq);152}153154int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)155{156lockdep_assert_held(&htt->tx_lock);157158if (htt->num_pending_tx >= htt->max_num_pending_tx)159return -EBUSY;160161htt->num_pending_tx++;162if (htt->num_pending_tx == htt->max_num_pending_tx)163ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);164165return 0;166}167168int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,169bool is_presp)170{171struct ath10k *ar = htt->ar;172173lockdep_assert_held(&htt->tx_lock);174175if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)176return 0;177178if (is_presp &&179ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)180return -EBUSY;181182htt->num_pending_mgmt_tx++;183184return 0;185}186187void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)188{189lockdep_assert_held(&htt->tx_lock);190191if (!htt->ar->hw_params.max_probe_resp_desc_thres)192return;193194htt->num_pending_mgmt_tx--;195}196197int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)198{199struct ath10k *ar = htt->ar;200int ret;201202spin_lock_bh(&htt->tx_lock);203ret = idr_alloc(&htt->pending_tx, skb, 0,204htt->max_num_pending_tx, GFP_ATOMIC);205spin_unlock_bh(&htt->tx_lock);206207ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);208209return ret;210}211212void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)213{214struct ath10k *ar = htt->ar;215216lockdep_assert_held(&htt->tx_lock);217218ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id);219220idr_remove(&htt->pending_tx, msdu_id);221}222223static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)224{225struct ath10k *ar = htt->ar;226size_t size;227228if (!htt->txbuf.vaddr_txbuff_32)229return;230231size = htt->txbuf.size;232dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,233htt->txbuf.paddr);234htt->txbuf.vaddr_txbuff_32 = NULL;235}236237static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)238{239struct ath10k *ar = htt->ar;240size_t size;241242size = htt->max_num_pending_tx *243sizeof(struct ath10k_htt_txbuf_32);244245htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,246&htt->txbuf.paddr,247GFP_KERNEL);248if (!htt->txbuf.vaddr_txbuff_32)249return -ENOMEM;250251htt->txbuf.size = size;252253return 0;254}255256static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)257{258struct ath10k *ar = htt->ar;259size_t size;260261if (!htt->txbuf.vaddr_txbuff_64)262return;263264size = htt->txbuf.size;265dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,266htt->txbuf.paddr);267htt->txbuf.vaddr_txbuff_64 = NULL;268}269270static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)271{272struct ath10k *ar = htt->ar;273size_t size;274275size = htt->max_num_pending_tx *276sizeof(struct ath10k_htt_txbuf_64);277278htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,279&htt->txbuf.paddr,280GFP_KERNEL);281if (!htt->txbuf.vaddr_txbuff_64)282return -ENOMEM;283284htt->txbuf.size = size;285286return 0;287}288289static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)290{291size_t size;292293if (!htt->frag_desc.vaddr_desc_32)294return;295296size = htt->max_num_pending_tx *297sizeof(struct htt_msdu_ext_desc);298299dma_free_coherent(htt->ar->dev,300size,301htt->frag_desc.vaddr_desc_32,302htt->frag_desc.paddr);303304htt->frag_desc.vaddr_desc_32 = NULL;305}306307static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)308{309struct ath10k *ar = htt->ar;310size_t size;311312if (!ar->hw_params.continuous_frag_desc)313return 0;314315size = htt->max_num_pending_tx *316sizeof(struct htt_msdu_ext_desc);317htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,318&htt->frag_desc.paddr,319GFP_KERNEL);320if (!htt->frag_desc.vaddr_desc_32) {321ath10k_err(ar, "failed to alloc fragment desc memory\n");322return -ENOMEM;323}324htt->frag_desc.size = size;325326return 0;327}328329static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)330{331size_t size;332333if (!htt->frag_desc.vaddr_desc_64)334return;335336size = htt->max_num_pending_tx *337sizeof(struct htt_msdu_ext_desc_64);338339dma_free_coherent(htt->ar->dev,340size,341htt->frag_desc.vaddr_desc_64,342htt->frag_desc.paddr);343344htt->frag_desc.vaddr_desc_64 = NULL;345}346347static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)348{349struct ath10k *ar = htt->ar;350size_t size;351352if (!ar->hw_params.continuous_frag_desc)353return 0;354355size = htt->max_num_pending_tx *356sizeof(struct htt_msdu_ext_desc_64);357358htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,359&htt->frag_desc.paddr,360GFP_KERNEL);361if (!htt->frag_desc.vaddr_desc_64) {362ath10k_err(ar, "failed to alloc fragment desc memory\n");363return -ENOMEM;364}365htt->frag_desc.size = size;366367return 0;368}369370static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)371{372struct ath10k *ar = htt->ar;373size_t size;374375if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,376ar->running_fw->fw_file.fw_features))377return;378379size = sizeof(*htt->tx_q_state.vaddr);380381dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);382kfree(htt->tx_q_state.vaddr);383}384385static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)386{387struct ath10k *ar = htt->ar;388size_t size;389int ret;390391if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,392ar->running_fw->fw_file.fw_features))393return 0;394395htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;396htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;397htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;398399size = sizeof(*htt->tx_q_state.vaddr);400htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);401if (!htt->tx_q_state.vaddr)402return -ENOMEM;403404htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,405size, DMA_TO_DEVICE);406ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);407if (ret) {408ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);409kfree(htt->tx_q_state.vaddr);410return -EIO;411}412413return 0;414}415416static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)417{418WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));419kfifo_free(&htt->txdone_fifo);420}421422static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)423{424int ret;425size_t size;426427size = roundup_pow_of_two(htt->max_num_pending_tx);428ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);429return ret;430}431432static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)433{434struct ath10k *ar = htt->ar;435int ret;436437ret = ath10k_htt_alloc_txbuff(htt);438if (ret) {439ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);440return ret;441}442443ret = ath10k_htt_alloc_frag_desc(htt);444if (ret) {445ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);446goto free_txbuf;447}448449ret = ath10k_htt_tx_alloc_txq(htt);450if (ret) {451ath10k_err(ar, "failed to alloc txq: %d\n", ret);452goto free_frag_desc;453}454455ret = ath10k_htt_tx_alloc_txdone_fifo(htt);456if (ret) {457ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);458goto free_txq;459}460461return 0;462463free_txq:464ath10k_htt_tx_free_txq(htt);465466free_frag_desc:467ath10k_htt_free_frag_desc(htt);468469free_txbuf:470ath10k_htt_free_txbuff(htt);471472return ret;473}474475int ath10k_htt_tx_start(struct ath10k_htt *htt)476{477struct ath10k *ar = htt->ar;478int ret;479480ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",481htt->max_num_pending_tx);482483spin_lock_init(&htt->tx_lock);484idr_init(&htt->pending_tx);485486if (htt->tx_mem_allocated)487return 0;488489if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)490return 0;491492ret = ath10k_htt_tx_alloc_buf(htt);493if (ret)494goto free_idr_pending_tx;495496htt->tx_mem_allocated = true;497498return 0;499500free_idr_pending_tx:501idr_destroy(&htt->pending_tx);502503return ret;504}505506static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)507{508struct ath10k *ar = ctx;509struct ath10k_htt *htt = &ar->htt;510struct htt_tx_done tx_done = {0};511512ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);513514tx_done.msdu_id = msdu_id;515tx_done.status = HTT_TX_COMPL_STATE_DISCARD;516517ath10k_txrx_tx_unref(htt, &tx_done);518519return 0;520}521522void ath10k_htt_tx_destroy(struct ath10k_htt *htt)523{524if (!htt->tx_mem_allocated)525return;526527ath10k_htt_free_txbuff(htt);528ath10k_htt_tx_free_txq(htt);529ath10k_htt_free_frag_desc(htt);530ath10k_htt_tx_free_txdone_fifo(htt);531htt->tx_mem_allocated = false;532}533534static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)535{536ath10k_htc_stop_hl(htt->ar);537idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);538}539540void ath10k_htt_tx_stop(struct ath10k_htt *htt)541{542ath10k_htt_flush_tx_queue(htt);543idr_destroy(&htt->pending_tx);544}545546void ath10k_htt_tx_free(struct ath10k_htt *htt)547{548ath10k_htt_tx_stop(htt);549ath10k_htt_tx_destroy(htt);550}551552void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)553{554queue_work(ar->workqueue, &ar->bundle_tx_work);555}556557void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)558{559struct ath10k_htt *htt = &ar->htt;560struct htt_tx_done tx_done = {0};561struct htt_cmd_hdr *htt_hdr;562struct htt_data_tx_desc *desc_hdr = NULL;563u16 flags1 = 0;564u8 msg_type = 0;565566if (htt->disable_tx_comp) {567htt_hdr = (struct htt_cmd_hdr *)skb->data;568msg_type = htt_hdr->msg_type;569570if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {571desc_hdr = (struct htt_data_tx_desc *)572(skb->data + sizeof(*htt_hdr));573flags1 = __le16_to_cpu(desc_hdr->flags1);574skb_pull(skb, sizeof(struct htt_cmd_hdr));575skb_pull(skb, sizeof(struct htt_data_tx_desc));576}577}578579dev_kfree_skb_any(skb);580581if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))582return;583584ath10k_dbg(ar, ATH10K_DBG_HTT,585"htt tx complete msdu id:%u ,flags1:%x\n",586__le16_to_cpu(desc_hdr->id), flags1);587588if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)589return;590591tx_done.status = HTT_TX_COMPL_STATE_ACK;592tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);593ath10k_txrx_tx_unref(&ar->htt, &tx_done);594}595596void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)597{598dev_kfree_skb_any(skb);599}600EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);601602int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)603{604struct ath10k *ar = htt->ar;605struct sk_buff *skb;606struct htt_cmd *cmd;607int len = 0;608int ret;609610len += sizeof(cmd->hdr);611len += sizeof(cmd->ver_req);612613skb = ath10k_htc_alloc_skb(ar, len);614if (!skb)615return -ENOMEM;616617skb_put(skb, len);618cmd = (struct htt_cmd *)skb->data;619cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;620621ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);622if (ret) {623dev_kfree_skb_any(skb);624return ret;625}626627return 0;628}629630int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,631u64 cookie)632{633struct ath10k *ar = htt->ar;634struct htt_stats_req *req;635struct sk_buff *skb;636struct htt_cmd *cmd;637int len = 0, ret;638639len += sizeof(cmd->hdr);640len += sizeof(cmd->stats_req);641642skb = ath10k_htc_alloc_skb(ar, len);643if (!skb)644return -ENOMEM;645646skb_put(skb, len);647cmd = (struct htt_cmd *)skb->data;648cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;649650req = &cmd->stats_req;651652memset(req, 0, sizeof(*req));653654/* currently we support only max 24 bit masks so no need to worry655* about endian support656*/657memcpy(req->upload_types, &mask, 3);658memcpy(req->reset_types, &reset_mask, 3);659req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;660req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);661req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);662663ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);664if (ret) {665ath10k_warn(ar, "failed to send htt type stats request: %d",666ret);667dev_kfree_skb_any(skb);668return ret;669}670671return 0;672}673674static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)675{676struct ath10k *ar = htt->ar;677struct sk_buff *skb;678struct htt_cmd *cmd;679struct htt_frag_desc_bank_cfg32 *cfg;680int ret, size;681u8 info;682683if (!ar->hw_params.continuous_frag_desc)684return 0;685686if (!htt->frag_desc.paddr) {687ath10k_warn(ar, "invalid frag desc memory\n");688return -EINVAL;689}690691size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);692skb = ath10k_htc_alloc_skb(ar, size);693if (!skb)694return -ENOMEM;695696skb_put(skb, size);697cmd = (struct htt_cmd *)skb->data;698cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;699700info = 0;701info |= SM(htt->tx_q_state.type,702HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);703704if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,705ar->running_fw->fw_file.fw_features))706info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;707708cfg = &cmd->frag_desc_bank_cfg32;709cfg->info = info;710cfg->num_banks = 1;711cfg->desc_size = sizeof(struct htt_msdu_ext_desc);712cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);713cfg->bank_id[0].bank_min_id = 0;714cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -7151);716717cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);718cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);719cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);720cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;721cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;722723ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");724725ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);726if (ret) {727ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",728ret);729dev_kfree_skb_any(skb);730return ret;731}732733return 0;734}735736static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)737{738struct ath10k *ar = htt->ar;739struct sk_buff *skb;740struct htt_cmd *cmd;741struct htt_frag_desc_bank_cfg64 *cfg;742int ret, size;743u8 info;744745if (!ar->hw_params.continuous_frag_desc)746return 0;747748if (!htt->frag_desc.paddr) {749ath10k_warn(ar, "invalid frag desc memory\n");750return -EINVAL;751}752753size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);754skb = ath10k_htc_alloc_skb(ar, size);755if (!skb)756return -ENOMEM;757758skb_put(skb, size);759cmd = (struct htt_cmd *)skb->data;760cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;761762info = 0;763info |= SM(htt->tx_q_state.type,764HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);765766if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,767ar->running_fw->fw_file.fw_features))768info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;769770cfg = &cmd->frag_desc_bank_cfg64;771cfg->info = info;772cfg->num_banks = 1;773cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);774cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr);775cfg->bank_id[0].bank_min_id = 0;776cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -7771);778779cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);780cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);781cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);782cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;783cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;784785ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");786787ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);788if (ret) {789ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",790ret);791dev_kfree_skb_any(skb);792return ret;793}794795return 0;796}797798static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring)799{800struct htt_rx_ring_setup_ring32 *ring =801(struct htt_rx_ring_setup_ring32 *)rx_ring;802803ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);804}805806static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring)807{808struct htt_rx_ring_setup_ring64 *ring =809(struct htt_rx_ring_setup_ring64 *)rx_ring;810811ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);812}813814static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)815{816struct ath10k *ar = htt->ar;817struct ath10k_hw_params *hw = &ar->hw_params;818struct sk_buff *skb;819struct htt_cmd *cmd;820struct htt_rx_ring_setup_ring32 *ring;821const int num_rx_ring = 1;822u16 flags;823u32 fw_idx;824int len;825int ret;826827/*828* the HW expects the buffer to be an integral number of 4-byte829* "words"830*/831BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));832BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);833834len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)835+ (sizeof(*ring) * num_rx_ring);836skb = ath10k_htc_alloc_skb(ar, len);837if (!skb)838return -ENOMEM;839840skb_put(skb, len);841842cmd = (struct htt_cmd *)skb->data;843ring = &cmd->rx_setup_32.rings[0];844845cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;846cmd->rx_setup_32.hdr.num_rings = 1;847848/* FIXME: do we need all of this? */849flags = 0;850flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;851flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;852flags |= HTT_RX_RING_FLAGS_PPDU_START;853flags |= HTT_RX_RING_FLAGS_PPDU_END;854flags |= HTT_RX_RING_FLAGS_MPDU_START;855flags |= HTT_RX_RING_FLAGS_MPDU_END;856flags |= HTT_RX_RING_FLAGS_MSDU_START;857flags |= HTT_RX_RING_FLAGS_MSDU_END;858flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;859flags |= HTT_RX_RING_FLAGS_FRAG_INFO;860flags |= HTT_RX_RING_FLAGS_UNICAST_RX;861flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;862flags |= HTT_RX_RING_FLAGS_CTRL_RX;863flags |= HTT_RX_RING_FLAGS_MGMT_RX;864flags |= HTT_RX_RING_FLAGS_NULL_RX;865flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;866867fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);868869ring->fw_idx_shadow_reg_paddr =870__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);871ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);872ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);873ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);874ring->flags = __cpu_to_le16(flags);875ring->fw_idx_init_val = __cpu_to_le16(fw_idx);876877ath10k_htt_fill_rx_desc_offset_32(hw, ring);878ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);879if (ret) {880dev_kfree_skb_any(skb);881return ret;882}883884return 0;885}886887static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)888{889struct ath10k *ar = htt->ar;890struct ath10k_hw_params *hw = &ar->hw_params;891struct sk_buff *skb;892struct htt_cmd *cmd;893struct htt_rx_ring_setup_ring64 *ring;894const int num_rx_ring = 1;895u16 flags;896u32 fw_idx;897int len;898int ret;899900/* HW expects the buffer to be an integral number of 4-byte901* "words"902*/903BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));904BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);905906len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)907+ (sizeof(*ring) * num_rx_ring);908skb = ath10k_htc_alloc_skb(ar, len);909if (!skb)910return -ENOMEM;911912skb_put(skb, len);913914cmd = (struct htt_cmd *)skb->data;915ring = &cmd->rx_setup_64.rings[0];916917cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;918cmd->rx_setup_64.hdr.num_rings = 1;919920flags = 0;921flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;922flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;923flags |= HTT_RX_RING_FLAGS_PPDU_START;924flags |= HTT_RX_RING_FLAGS_PPDU_END;925flags |= HTT_RX_RING_FLAGS_MPDU_START;926flags |= HTT_RX_RING_FLAGS_MPDU_END;927flags |= HTT_RX_RING_FLAGS_MSDU_START;928flags |= HTT_RX_RING_FLAGS_MSDU_END;929flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;930flags |= HTT_RX_RING_FLAGS_FRAG_INFO;931flags |= HTT_RX_RING_FLAGS_UNICAST_RX;932flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;933flags |= HTT_RX_RING_FLAGS_CTRL_RX;934flags |= HTT_RX_RING_FLAGS_MGMT_RX;935flags |= HTT_RX_RING_FLAGS_NULL_RX;936flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;937938fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);939940ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);941ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);942ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);943ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);944ring->flags = __cpu_to_le16(flags);945ring->fw_idx_init_val = __cpu_to_le16(fw_idx);946947ath10k_htt_fill_rx_desc_offset_64(hw, ring);948ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);949if (ret) {950dev_kfree_skb_any(skb);951return ret;952}953954return 0;955}956957static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)958{959struct ath10k *ar = htt->ar;960struct sk_buff *skb;961struct htt_cmd *cmd;962struct htt_rx_ring_setup_ring32 *ring;963const int num_rx_ring = 1;964u16 flags;965int len;966int ret;967968/*969* the HW expects the buffer to be an integral number of 4-byte970* "words"971*/972BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));973BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);974975len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)976+ (sizeof(*ring) * num_rx_ring);977skb = ath10k_htc_alloc_skb(ar, len);978if (!skb)979return -ENOMEM;980981skb_put(skb, len);982983cmd = (struct htt_cmd *)skb->data;984ring = &cmd->rx_setup_32.rings[0];985986cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;987cmd->rx_setup_32.hdr.num_rings = 1;988989flags = 0;990flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;991flags |= HTT_RX_RING_FLAGS_UNICAST_RX;992flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;993994memset(ring, 0, sizeof(*ring));995ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);996ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);997ring->flags = __cpu_to_le16(flags);998999ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);1000if (ret) {1001dev_kfree_skb_any(skb);1002return ret;1003}10041005return 0;1006}10071008static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,1009u8 max_subfrms_ampdu,1010u8 max_subfrms_amsdu)1011{1012struct ath10k *ar = htt->ar;1013struct htt_aggr_conf *aggr_conf;1014struct sk_buff *skb;1015struct htt_cmd *cmd;1016int len;1017int ret;10181019/* Firmware defaults are: amsdu = 3 and ampdu = 64 */10201021if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)1022return -EINVAL;10231024if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)1025return -EINVAL;10261027len = sizeof(cmd->hdr);1028len += sizeof(cmd->aggr_conf);10291030skb = ath10k_htc_alloc_skb(ar, len);1031if (!skb)1032return -ENOMEM;10331034skb_put(skb, len);1035cmd = (struct htt_cmd *)skb->data;1036cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;10371038aggr_conf = &cmd->aggr_conf;1039aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;1040aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;10411042ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",1043aggr_conf->max_num_amsdu_subframes,1044aggr_conf->max_num_ampdu_subframes);10451046ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);1047if (ret) {1048dev_kfree_skb_any(skb);1049return ret;1050}10511052return 0;1053}10541055static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,1056u8 max_subfrms_ampdu,1057u8 max_subfrms_amsdu)1058{1059struct ath10k *ar = htt->ar;1060struct htt_aggr_conf_v2 *aggr_conf;1061struct sk_buff *skb;1062struct htt_cmd *cmd;1063int len;1064int ret;10651066/* Firmware defaults are: amsdu = 3 and ampdu = 64 */10671068if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)1069return -EINVAL;10701071if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)1072return -EINVAL;10731074len = sizeof(cmd->hdr);1075len += sizeof(cmd->aggr_conf_v2);10761077skb = ath10k_htc_alloc_skb(ar, len);1078if (!skb)1079return -ENOMEM;10801081skb_put(skb, len);1082cmd = (struct htt_cmd *)skb->data;1083cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;10841085aggr_conf = &cmd->aggr_conf_v2;1086aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;1087aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;10881089ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",1090aggr_conf->max_num_amsdu_subframes,1091aggr_conf->max_num_ampdu_subframes);10921093ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);1094if (ret) {1095dev_kfree_skb_any(skb);1096return ret;1097}10981099return 0;1100}11011102int ath10k_htt_tx_fetch_resp(struct ath10k *ar,1103__le32 token,1104__le16 fetch_seq_num,1105struct htt_tx_fetch_record *records,1106size_t num_records)1107{1108struct sk_buff *skb;1109struct htt_cmd *cmd;1110const u16 resp_id = 0;1111int len = 0;1112int ret;11131114/* Response IDs are echo-ed back only for host driver convenience1115* purposes. They aren't used for anything in the driver yet so use 0.1116*/11171118len += sizeof(cmd->hdr);1119len += sizeof(cmd->tx_fetch_resp);1120len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;11211122skb = ath10k_htc_alloc_skb(ar, len);1123if (!skb)1124return -ENOMEM;11251126skb_put(skb, len);1127cmd = (struct htt_cmd *)skb->data;1128cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;1129cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);1130cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;1131cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);1132cmd->tx_fetch_resp.token = token;11331134memcpy(cmd->tx_fetch_resp.records, records,1135sizeof(records[0]) * num_records);11361137ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);1138if (ret) {1139ath10k_warn(ar, "failed to submit htc command: %d\n", ret);1140goto err_free_skb;1141}11421143return 0;11441145err_free_skb:1146dev_kfree_skb_any(skb);11471148return ret;1149}11501151static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)1152{1153struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);1154struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);1155struct ath10k_vif *arvif;11561157if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {1158return ar->scan.vdev_id;1159} else if (cb->vif) {1160arvif = (void *)cb->vif->drv_priv;1161return arvif->vdev_id;1162} else if (ar->monitor_started) {1163return ar->monitor_vdev_id;1164} else {1165return 0;1166}1167}11681169static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)1170{1171struct ieee80211_hdr *hdr = (void *)skb->data;1172struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);11731174if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))1175return HTT_DATA_TX_EXT_TID_MGMT;1176else if (cb->flags & ATH10K_SKB_F_QOS)1177return skb->priority & IEEE80211_QOS_CTL_TID_MASK;1178else1179return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;1180}11811182int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)1183{1184struct ath10k *ar = htt->ar;1185struct device *dev = ar->dev;1186struct sk_buff *txdesc = NULL;1187struct htt_cmd *cmd;1188struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);1189u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);1190int len = 0;1191int msdu_id = -1;1192int res;1193const u8 *peer_addr;1194struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;11951196len += sizeof(cmd->hdr);1197len += sizeof(cmd->mgmt_tx);11981199res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);1200if (res < 0)1201goto err;12021203msdu_id = res;12041205if ((ieee80211_is_action(hdr->frame_control) ||1206ieee80211_is_deauth(hdr->frame_control) ||1207ieee80211_is_disassoc(hdr->frame_control)) &&1208ieee80211_has_protected(hdr->frame_control)) {1209peer_addr = hdr->addr1;1210if (is_multicast_ether_addr(peer_addr)) {1211skb_put(msdu, sizeof(struct ieee80211_mmie_16));1212} else {1213if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||1214skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)1215skb_put(msdu, IEEE80211_GCMP_MIC_LEN);1216else1217skb_put(msdu, IEEE80211_CCMP_MIC_LEN);1218}1219}12201221txdesc = ath10k_htc_alloc_skb(ar, len);1222if (!txdesc) {1223res = -ENOMEM;1224goto err_free_msdu_id;1225}12261227skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,1228DMA_TO_DEVICE);1229res = dma_mapping_error(dev, skb_cb->paddr);1230if (res) {1231res = -EIO;1232goto err_free_txdesc;1233}12341235skb_put(txdesc, len);1236cmd = (struct htt_cmd *)txdesc->data;1237memset(cmd, 0, len);12381239cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;1240cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);1241cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);1242cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);1243cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);1244memcpy(cmd->mgmt_tx.hdr, msdu->data,1245min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));12461247res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);1248if (res)1249goto err_unmap_msdu;12501251return 0;12521253err_unmap_msdu:1254if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)1255dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);1256err_free_txdesc:1257dev_kfree_skb_any(txdesc);1258err_free_msdu_id:1259spin_lock_bh(&htt->tx_lock);1260ath10k_htt_tx_free_msdu_id(htt, msdu_id);1261spin_unlock_bh(&htt->tx_lock);1262err:1263return res;1264}12651266#define HTT_TX_HL_NEEDED_HEADROOM \1267(unsigned int)(sizeof(struct htt_cmd_hdr) + \1268sizeof(struct htt_data_tx_desc) + \1269sizeof(struct ath10k_htc_hdr))12701271static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,1272struct sk_buff *msdu)1273{1274struct ath10k *ar = htt->ar;1275int res, data_len;1276struct htt_cmd_hdr *cmd_hdr;1277struct htt_data_tx_desc *tx_desc;1278struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);1279struct sk_buff *tmp_skb;1280bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);1281u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);1282u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);1283u8 flags0 = 0;1284u16 flags1 = 0;1285u16 msdu_id = 0;12861287if (!is_eth) {1288struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;12891290if ((ieee80211_is_action(hdr->frame_control) ||1291ieee80211_is_deauth(hdr->frame_control) ||1292ieee80211_is_disassoc(hdr->frame_control)) &&1293ieee80211_has_protected(hdr->frame_control)) {1294skb_put(msdu, IEEE80211_CCMP_MIC_LEN);1295}1296}12971298data_len = msdu->len;12991300switch (txmode) {1301case ATH10K_HW_TXRX_RAW:1302case ATH10K_HW_TXRX_NATIVE_WIFI:1303flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;1304fallthrough;1305case ATH10K_HW_TXRX_ETHERNET:1306flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);1307break;1308case ATH10K_HW_TXRX_MGMT:1309flags0 |= SM(ATH10K_HW_TXRX_MGMT,1310HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);1311flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;13121313if (htt->disable_tx_comp)1314flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;1315break;1316}13171318if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)1319flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;13201321flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);1322flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);1323if (msdu->ip_summed == CHECKSUM_PARTIAL &&1324!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {1325flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;1326flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;1327}13281329/* Prepend the HTT header and TX desc struct to the data message1330* and realloc the skb if it does not have enough headroom.1331*/1332if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {1333tmp_skb = msdu;13341335ath10k_dbg(htt->ar, ATH10K_DBG_HTT,1336"Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",1337skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);1338msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);1339kfree_skb(tmp_skb);1340if (!msdu) {1341ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");1342res = -ENOMEM;1343goto out;1344}1345}13461347if (ar->bus_param.hl_msdu_ids) {1348flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;1349res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);1350if (res < 0) {1351ath10k_err(ar, "msdu_id allocation failed %d\n", res);1352goto out;1353}1354msdu_id = res;1355}13561357/* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by1358* ath10k (in ath10k_htt_htc_tx_complete()) we have to increase1359* reference by one to avoid a use-after-free case and a double1360* free.1361*/1362skb_get(msdu);13631364skb_push(msdu, sizeof(*cmd_hdr));1365skb_push(msdu, sizeof(*tx_desc));1366cmd_hdr = (struct htt_cmd_hdr *)msdu->data;1367tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));13681369cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;1370tx_desc->flags0 = flags0;1371tx_desc->flags1 = __cpu_to_le16(flags1);1372tx_desc->len = __cpu_to_le16(data_len);1373tx_desc->id = __cpu_to_le16(msdu_id);1374tx_desc->frags_paddr = 0; /* always zero */1375/* Initialize peer_id to INVALID_PEER because this is NOT1376* Reinjection path1377*/1378tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);13791380res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);13811382out:1383return res;1384}13851386static int ath10k_htt_tx_32(struct ath10k_htt *htt,1387enum ath10k_hw_txrx_mode txmode,1388struct sk_buff *msdu)1389{1390struct ath10k *ar = htt->ar;1391struct device *dev = ar->dev;1392struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);1393struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);1394struct ath10k_hif_sg_item sg_items[2];1395struct ath10k_htt_txbuf_32 *txbuf;1396struct htt_data_tx_desc_frag *frags;1397bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);1398u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);1399u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);1400int prefetch_len;1401int res;1402u8 flags0 = 0;1403u16 msdu_id, flags1 = 0;1404u16 freq = 0;1405u32 frags_paddr = 0;1406u32 txbuf_paddr;1407struct htt_msdu_ext_desc *ext_desc = NULL;1408struct htt_msdu_ext_desc *ext_desc_t = NULL;14091410res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);1411if (res < 0)1412goto err;14131414msdu_id = res;14151416prefetch_len = min(htt->prefetch_len, msdu->len);1417prefetch_len = roundup(prefetch_len, 4);14181419txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;1420txbuf_paddr = htt->txbuf.paddr +1421(sizeof(struct ath10k_htt_txbuf_32) * msdu_id);14221423if (!is_eth) {1424struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;14251426if ((ieee80211_is_action(hdr->frame_control) ||1427ieee80211_is_deauth(hdr->frame_control) ||1428ieee80211_is_disassoc(hdr->frame_control)) &&1429ieee80211_has_protected(hdr->frame_control)) {1430skb_put(msdu, IEEE80211_CCMP_MIC_LEN);1431} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&1432txmode == ATH10K_HW_TXRX_RAW &&1433ieee80211_has_protected(hdr->frame_control)) {1434skb_put(msdu, IEEE80211_CCMP_MIC_LEN);1435}1436}14371438skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,1439DMA_TO_DEVICE);1440res = dma_mapping_error(dev, skb_cb->paddr);1441if (res) {1442res = -EIO;1443goto err_free_msdu_id;1444}14451446if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))1447freq = ar->scan.roc_freq;14481449switch (txmode) {1450case ATH10K_HW_TXRX_RAW:1451case ATH10K_HW_TXRX_NATIVE_WIFI:1452flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;1453fallthrough;1454case ATH10K_HW_TXRX_ETHERNET:1455if (ar->hw_params.continuous_frag_desc) {1456ext_desc_t = htt->frag_desc.vaddr_desc_32;1457memset(&ext_desc_t[msdu_id], 0,1458sizeof(struct htt_msdu_ext_desc));1459frags = (struct htt_data_tx_desc_frag *)1460&ext_desc_t[msdu_id].frags;1461ext_desc = &ext_desc_t[msdu_id];1462frags[0].tword_addr.paddr_lo =1463__cpu_to_le32(skb_cb->paddr);1464frags[0].tword_addr.paddr_hi = 0;1465frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);14661467frags_paddr = htt->frag_desc.paddr +1468(sizeof(struct htt_msdu_ext_desc) * msdu_id);1469} else {1470frags = txbuf->frags;1471frags[0].dword_addr.paddr =1472__cpu_to_le32(skb_cb->paddr);1473frags[0].dword_addr.len = __cpu_to_le32(msdu->len);1474frags[1].dword_addr.paddr = 0;1475frags[1].dword_addr.len = 0;14761477frags_paddr = txbuf_paddr;1478}1479flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);1480break;1481case ATH10K_HW_TXRX_MGMT:1482flags0 |= SM(ATH10K_HW_TXRX_MGMT,1483HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);1484flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;14851486frags_paddr = skb_cb->paddr;1487break;1488}14891490/* Normally all commands go through HTC which manages tx credits for1491* each endpoint and notifies when tx is completed.1492*1493* HTT endpoint is creditless so there's no need to care about HTC1494* flags. In that case it is trivial to fill the HTC header here.1495*1496* MSDU transmission is considered completed upon HTT event. This1497* implies no relevant resources can be freed until after the event is1498* received. That's why HTC tx completion handler itself is ignored by1499* setting NULL to transfer_context for all sg items.1500*1501* There is simply no point in pushing HTT TX_FRM through HTC tx path1502* as it's a waste of resources. By bypassing HTC it is possible to1503* avoid extra memory allocations, compress data structures and thus1504* improve performance.1505*/15061507txbuf->htc_hdr.eid = htt->eid;1508txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +1509sizeof(txbuf->cmd_tx) +1510prefetch_len);1511txbuf->htc_hdr.flags = 0;15121513if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)1514flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;15151516flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);1517flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);1518if (msdu->ip_summed == CHECKSUM_PARTIAL &&1519!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {1520flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;1521flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;1522if (ar->hw_params.continuous_frag_desc)1523ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;1524}15251526/* Prevent firmware from sending up tx inspection requests. There's1527* nothing ath10k can do with frames requested for inspection so force1528* it to simply rely a regular tx completion with discard status.1529*/1530flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;15311532txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;1533txbuf->cmd_tx.flags0 = flags0;1534txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);1535txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);1536txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);1537txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);1538if (ath10k_mac_tx_frm_has_freq(ar)) {1539txbuf->cmd_tx.offchan_tx.peerid =1540__cpu_to_le16(HTT_INVALID_PEERID);1541txbuf->cmd_tx.offchan_tx.freq =1542__cpu_to_le16(freq);1543} else {1544txbuf->cmd_tx.peerid =1545__cpu_to_le32(HTT_INVALID_PEERID);1546}15471548trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);1549ath10k_dbg(ar, ATH10K_DBG_HTT,1550"htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",1551flags0, flags1, msdu->len, msdu_id, &frags_paddr,1552&skb_cb->paddr, vdev_id, tid, freq);1553ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",1554msdu->data, msdu->len);1555trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);1556trace_ath10k_tx_payload(ar, msdu->data, msdu->len);15571558sg_items[0].transfer_id = 0;1559sg_items[0].transfer_context = NULL;1560sg_items[0].vaddr = &txbuf->htc_hdr;1561sg_items[0].paddr = txbuf_paddr +1562sizeof(txbuf->frags);1563sg_items[0].len = sizeof(txbuf->htc_hdr) +1564sizeof(txbuf->cmd_hdr) +1565sizeof(txbuf->cmd_tx);15661567sg_items[1].transfer_id = 0;1568sg_items[1].transfer_context = NULL;1569sg_items[1].vaddr = msdu->data;1570sg_items[1].paddr = skb_cb->paddr;1571sg_items[1].len = prefetch_len;15721573res = ath10k_hif_tx_sg(htt->ar,1574htt->ar->htc.endpoint[htt->eid].ul_pipe_id,1575sg_items, ARRAY_SIZE(sg_items));1576if (res)1577goto err_unmap_msdu;15781579return 0;15801581err_unmap_msdu:1582dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);1583err_free_msdu_id:1584spin_lock_bh(&htt->tx_lock);1585ath10k_htt_tx_free_msdu_id(htt, msdu_id);1586spin_unlock_bh(&htt->tx_lock);1587err:1588return res;1589}15901591static int ath10k_htt_tx_64(struct ath10k_htt *htt,1592enum ath10k_hw_txrx_mode txmode,1593struct sk_buff *msdu)1594{1595struct ath10k *ar = htt->ar;1596struct device *dev = ar->dev;1597struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);1598struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);1599struct ath10k_hif_sg_item sg_items[2];1600struct ath10k_htt_txbuf_64 *txbuf;1601struct htt_data_tx_desc_frag *frags;1602bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);1603u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);1604u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);1605int prefetch_len;1606int res;1607u8 flags0 = 0;1608u16 msdu_id, flags1 = 0;1609u16 freq = 0;1610dma_addr_t frags_paddr = 0;1611dma_addr_t txbuf_paddr;1612struct htt_msdu_ext_desc_64 *ext_desc = NULL;1613struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;16141615res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);1616if (res < 0)1617goto err;16181619msdu_id = res;16201621prefetch_len = min(htt->prefetch_len, msdu->len);1622prefetch_len = roundup(prefetch_len, 4);16231624txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;1625txbuf_paddr = htt->txbuf.paddr +1626(sizeof(struct ath10k_htt_txbuf_64) * msdu_id);16271628if (!is_eth) {1629struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;16301631if ((ieee80211_is_action(hdr->frame_control) ||1632ieee80211_is_deauth(hdr->frame_control) ||1633ieee80211_is_disassoc(hdr->frame_control)) &&1634ieee80211_has_protected(hdr->frame_control)) {1635skb_put(msdu, IEEE80211_CCMP_MIC_LEN);1636} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&1637txmode == ATH10K_HW_TXRX_RAW &&1638ieee80211_has_protected(hdr->frame_control)) {1639skb_put(msdu, IEEE80211_CCMP_MIC_LEN);1640}1641}16421643skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,1644DMA_TO_DEVICE);1645res = dma_mapping_error(dev, skb_cb->paddr);1646if (res) {1647res = -EIO;1648goto err_free_msdu_id;1649}16501651if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))1652freq = ar->scan.roc_freq;16531654switch (txmode) {1655case ATH10K_HW_TXRX_RAW:1656case ATH10K_HW_TXRX_NATIVE_WIFI:1657flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;1658fallthrough;1659case ATH10K_HW_TXRX_ETHERNET:1660if (ar->hw_params.continuous_frag_desc) {1661ext_desc_t = htt->frag_desc.vaddr_desc_64;1662memset(&ext_desc_t[msdu_id], 0,1663sizeof(struct htt_msdu_ext_desc_64));1664frags = (struct htt_data_tx_desc_frag *)1665&ext_desc_t[msdu_id].frags;1666ext_desc = &ext_desc_t[msdu_id];1667frags[0].tword_addr.paddr_lo =1668__cpu_to_le32(skb_cb->paddr);1669frags[0].tword_addr.paddr_hi =1670__cpu_to_le16(upper_32_bits(skb_cb->paddr));1671frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);16721673frags_paddr = htt->frag_desc.paddr +1674(sizeof(struct htt_msdu_ext_desc_64) * msdu_id);1675} else {1676frags = txbuf->frags;1677frags[0].tword_addr.paddr_lo =1678__cpu_to_le32(skb_cb->paddr);1679frags[0].tword_addr.paddr_hi =1680__cpu_to_le16(upper_32_bits(skb_cb->paddr));1681frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);1682frags[1].tword_addr.paddr_lo = 0;1683frags[1].tword_addr.paddr_hi = 0;1684frags[1].tword_addr.len_16 = 0;1685}1686flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);1687break;1688case ATH10K_HW_TXRX_MGMT:1689flags0 |= SM(ATH10K_HW_TXRX_MGMT,1690HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);1691flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;16921693frags_paddr = skb_cb->paddr;1694break;1695}16961697/* Normally all commands go through HTC which manages tx credits for1698* each endpoint and notifies when tx is completed.1699*1700* HTT endpoint is creditless so there's no need to care about HTC1701* flags. In that case it is trivial to fill the HTC header here.1702*1703* MSDU transmission is considered completed upon HTT event. This1704* implies no relevant resources can be freed until after the event is1705* received. That's why HTC tx completion handler itself is ignored by1706* setting NULL to transfer_context for all sg items.1707*1708* There is simply no point in pushing HTT TX_FRM through HTC tx path1709* as it's a waste of resources. By bypassing HTC it is possible to1710* avoid extra memory allocations, compress data structures and thus1711* improve performance.1712*/17131714txbuf->htc_hdr.eid = htt->eid;1715txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +1716sizeof(txbuf->cmd_tx) +1717prefetch_len);1718txbuf->htc_hdr.flags = 0;17191720if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)1721flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;17221723flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);1724flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);1725if (msdu->ip_summed == CHECKSUM_PARTIAL &&1726!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {1727flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;1728flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;1729if (ar->hw_params.continuous_frag_desc) {1730memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));1731ext_desc->tso_flag[3] |=1732__cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);1733}1734}17351736/* Prevent firmware from sending up tx inspection requests. There's1737* nothing ath10k can do with frames requested for inspection so force1738* it to simply rely a regular tx completion with discard status.1739*/1740flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;17411742txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;1743txbuf->cmd_tx.flags0 = flags0;1744txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);1745txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);1746txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);17471748/* fill fragment descriptor */1749txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);1750if (ath10k_mac_tx_frm_has_freq(ar)) {1751txbuf->cmd_tx.offchan_tx.peerid =1752__cpu_to_le16(HTT_INVALID_PEERID);1753txbuf->cmd_tx.offchan_tx.freq =1754__cpu_to_le16(freq);1755} else {1756txbuf->cmd_tx.peerid =1757__cpu_to_le32(HTT_INVALID_PEERID);1758}17591760trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);1761ath10k_dbg(ar, ATH10K_DBG_HTT,1762"htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",1763flags0, flags1, msdu->len, msdu_id, &frags_paddr,1764&skb_cb->paddr, vdev_id, tid, freq);1765ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",1766msdu->data, msdu->len);1767trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);1768trace_ath10k_tx_payload(ar, msdu->data, msdu->len);17691770sg_items[0].transfer_id = 0;1771sg_items[0].transfer_context = NULL;1772sg_items[0].vaddr = &txbuf->htc_hdr;1773sg_items[0].paddr = txbuf_paddr +1774sizeof(txbuf->frags);1775sg_items[0].len = sizeof(txbuf->htc_hdr) +1776sizeof(txbuf->cmd_hdr) +1777sizeof(txbuf->cmd_tx);17781779sg_items[1].transfer_id = 0;1780sg_items[1].transfer_context = NULL;1781sg_items[1].vaddr = msdu->data;1782sg_items[1].paddr = skb_cb->paddr;1783sg_items[1].len = prefetch_len;17841785res = ath10k_hif_tx_sg(htt->ar,1786htt->ar->htc.endpoint[htt->eid].ul_pipe_id,1787sg_items, ARRAY_SIZE(sg_items));1788if (res)1789goto err_unmap_msdu;17901791return 0;17921793err_unmap_msdu:1794dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);1795err_free_msdu_id:1796spin_lock_bh(&htt->tx_lock);1797ath10k_htt_tx_free_msdu_id(htt, msdu_id);1798spin_unlock_bh(&htt->tx_lock);1799err:1800return res;1801}18021803static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {1804.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,1805.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,1806.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,1807.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,1808.htt_tx = ath10k_htt_tx_32,1809.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,1810.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,1811.htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,1812};18131814static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {1815.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,1816.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,1817.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,1818.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,1819.htt_tx = ath10k_htt_tx_64,1820.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,1821.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,1822.htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,1823};18241825static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {1826.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,1827.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,1828.htt_tx = ath10k_htt_tx_hl,1829.htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,1830.htt_flush_tx = ath10k_htt_flush_tx_queue,1831};18321833void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)1834{1835struct ath10k *ar = htt->ar;18361837if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)1838htt->tx_ops = &htt_tx_ops_hl;1839else if (ar->hw_params.target_64bit)1840htt->tx_ops = &htt_tx_ops_64;1841else1842htt->tx_ops = &htt_tx_ops_32;1843}184418451846