Path: blob/main/sys/contrib/dev/iwlwifi/pcie/gen1_2/tx.c
48406 views
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause1/*2* Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation3* Copyright (C) 2013-2015 Intel Mobile Communications GmbH4* Copyright (C) 2016-2017 Intel Deutschland GmbH5*/6#include <linux/etherdevice.h>7#include <linux/ieee80211.h>8#include <linux/dmapool.h>9#include <linux/slab.h>10#include <linux/sched.h>11#include <linux/tcp.h>12#ifdef CONFIG_INET13#include <net/ip6_checksum.h>14#include <net/tso.h>15#endif16#if defined(__FreeBSD__)17#include <net/mac80211.h>18#endif1920#include "fw/api/commands.h"21#include "fw/api/datapath.h"22#include "fw/api/debug.h"23#include "iwl-fh.h"24#include "iwl-debug.h"25#include "iwl-csr.h"26#include "iwl-prph.h"27#include "iwl-io.h"28#include "iwl-scd.h"29#include "iwl-op-mode.h"30#include "internal.h"31#include "fw/api/tx.h"32#include "fw/dbg.h"33#include "pcie/utils.h"3435/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****36* DMA services37*38* Theory of operation39*40* A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer41* of buffer descriptors, each of which points to one or more data buffers for42* the device to read from or fill. Driver and device exchange status of each43* queue via "read" and "write" pointers. Driver keeps minimum of 2 empty44* entries in each circular buffer, to protect against confusing empty and full45* queue states.46*47* The device reads or writes the data in the queues via the device's several48* DMA/FIFO channels. Each queue is mapped to a single DMA channel.49*50* For Tx queue, there are low mark and high mark limits. If, after queuing51* the packet for Tx, free space become < low mark, Tx queue stopped. When52* reclaiming packets (on 'tx done IRQ), if free space become > high mark,53* Tx queue resumed.54*55***************************************************/565758int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,59struct iwl_dma_ptr *ptr, size_t size)60{61if (WARN_ON(ptr->addr))62return -EINVAL;6364ptr->addr = dma_alloc_coherent(trans->dev, size,65&ptr->dma, GFP_KERNEL);66if (!ptr->addr)67return -ENOMEM;68ptr->size = size;69return 0;70}7172void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)73{74if (unlikely(!ptr->addr))75return;7677dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);78memset(ptr, 0, sizeof(*ptr));79}8081/*82* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware83*/84static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,85struct iwl_txq *txq)86{87u32 reg = 0;88int txq_id = txq->id;8990lockdep_assert_held(&txq->lock);9192/*93* explicitly wake up the NIC if:94* 1. shadow registers aren't enabled95* 2. NIC is woken up for CMD regardless of shadow outside this function96* 3. there is a chance that the NIC is asleep97*/98if (!trans->mac_cfg->base->shadow_reg_enable &&99txq_id != trans->conf.cmd_queue &&100test_bit(STATUS_TPOWER_PMI, &trans->status)) {101/*102* wake up nic if it's powered down ...103* uCode will wake up, and interrupt us again, so next104* time we'll skip this part.105*/106reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);107108if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {109IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",110txq_id, reg);111iwl_set_bit(trans, CSR_GP_CNTRL,112CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);113txq->need_update = true;114return;115}116}117118/*119* if not in power-save mode, uCode will never sleep when we're120* trying to tx (during RFKILL, we're not trying to tx).121*/122IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);123if (!txq->block)124iwl_write32(trans, HBUS_TARG_WRPTR,125txq->write_ptr | (txq_id << 8));126}127128void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)129{130struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);131int i;132133for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {134struct iwl_txq *txq = trans_pcie->txqs.txq[i];135136if (!test_bit(i, trans_pcie->txqs.queue_used))137continue;138139spin_lock_bh(&txq->lock);140if (txq->need_update) {141iwl_pcie_txq_inc_wr_ptr(trans, txq);142txq->need_update = false;143}144spin_unlock_bh(&txq->lock);145}146}147148static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,149u8 idx, dma_addr_t addr, u16 len)150{151struct iwl_tfd_tb *tb = &tfd->tbs[idx];152u16 hi_n_len = len << 4;153154put_unaligned_le32(addr, &tb->lo);155hi_n_len |= iwl_get_dma_hi_addr(addr);156157tb->hi_n_len = cpu_to_le16(hi_n_len);158159tfd->num_tbs = idx + 1;160}161162static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)163{164return tfd->num_tbs & 0x1f;165}166167static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,168dma_addr_t addr, u16 len, bool reset)169{170struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);171void *tfd;172u32 num_tbs;173174tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;175176if (reset)177memset(tfd, 0, trans_pcie->txqs.tfd.size);178179num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);180181/* Each TFD can point to a maximum max_tbs Tx buffers */182if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {183IWL_ERR(trans, "Error can not send more than %d chunks\n",184trans_pcie->txqs.tfd.max_tbs);185return -EINVAL;186}187188if (WARN(addr & ~IWL_TX_DMA_MASK,189"Unaligned address = %llx\n", (unsigned long long)addr))190return -EINVAL;191192iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);193194return num_tbs;195}196197static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)198{199struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);200201if (!trans->mac_cfg->base->apmg_wake_up_wa)202return;203204spin_lock(&trans_pcie->reg_lock);205206if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {207spin_unlock(&trans_pcie->reg_lock);208return;209}210211trans_pcie->cmd_hold_nic_awake = false;212iwl_trans_clear_bit(trans, CSR_GP_CNTRL,213CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);214spin_unlock(&trans_pcie->reg_lock);215}216217static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,218struct page *page)219{220struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));221222/* Decrease internal use count and unmap/free page if needed */223if (refcount_dec_and_test(&info->use_count)) {224dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,225DMA_TO_DEVICE);226227__free_page(page);228}229}230231void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,232struct iwl_cmd_meta *cmd_meta)233{234struct page **page_ptr;235struct page *next;236237page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);238next = *page_ptr;239*page_ptr = NULL;240241while (next) {242struct iwl_tso_page_info *info;243struct page *tmp = next;244245info = IWL_TSO_PAGE_INFO(page_address(next));246next = info->next;247248/* Unmap the scatter gather list that is on the last page */249if (!next && cmd_meta->sg_offset) {250struct sg_table *sgt;251252sgt = (void *)((u8 *)page_address(tmp) +253cmd_meta->sg_offset);254255dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);256}257258iwl_pcie_free_and_unmap_tso_page(trans, tmp);259}260}261262static inline dma_addr_t263iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)264{265struct iwl_tfd_tb *tb = &tfd->tbs[idx];266dma_addr_t addr;267dma_addr_t hi_len;268269addr = get_unaligned_le32(&tb->lo);270271if (sizeof(dma_addr_t) <= sizeof(u32))272return addr;273274hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;275276/*277* shift by 16 twice to avoid warnings on 32-bit278* (where this code never runs anyway due to the279* if statement above)280*/281return addr | ((hi_len << 16) << 16);282}283284static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,285struct iwl_tfd *tfd)286{287struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);288289tfd->num_tbs = 0;290291iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma,292trans_pcie->invalid_tx_cmd.size);293}294295static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,296struct iwl_cmd_meta *meta,297struct iwl_txq *txq, int index)298{299struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);300int i, num_tbs;301struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);302303/* Sanity check on number of chunks */304num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);305306if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {307IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);308/* @todo issue fatal error, it is quite serious situation */309return;310}311312/* TB1 is mapped directly, the rest is the TSO page and SG list. */313if (meta->sg_offset)314num_tbs = 2;315316/* first TB is never freed - it's the bidirectional DMA data */317318for (i = 1; i < num_tbs; i++) {319if (meta->tbs & BIT(i))320dma_unmap_page(trans->dev,321iwl_txq_gen1_tfd_tb_get_addr(tfd, i),322iwl_txq_gen1_tfd_tb_get_len(trans,323tfd, i),324DMA_TO_DEVICE);325else326dma_unmap_single(trans->dev,327iwl_txq_gen1_tfd_tb_get_addr(tfd, i),328iwl_txq_gen1_tfd_tb_get_len(trans,329tfd, i),330DMA_TO_DEVICE);331}332333meta->tbs = 0;334335iwl_txq_set_tfd_invalid_gen1(trans, tfd);336}337338/**339* iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]340* @trans: transport private data341* @txq: tx queue342* @read_ptr: the TXQ read_ptr to free343*344* Does NOT advance any TFD circular buffer read/write indexes345* Does NOT free the TFD itself (which is within circular buffer)346*/347static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,348int read_ptr)349{350/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and351* idx is bounded by n_window352*/353int idx = iwl_txq_get_cmd_index(txq, read_ptr);354struct sk_buff *skb;355356lockdep_assert_held(&txq->reclaim_lock);357358if (!txq->entries)359return;360361/* We have only q->n_window txq->entries, but we use362* TFD_QUEUE_SIZE_MAX tfds363*/364if (trans->mac_cfg->gen2)365iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,366iwl_txq_get_tfd(trans, txq, read_ptr));367else368iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,369txq, read_ptr);370371/* free SKB */372skb = txq->entries[idx].skb;373374/* Can be called from irqs-disabled context375* If skb is not NULL, it means that the whole queue is being376* freed and that the queue is not empty - free the skb377*/378if (skb) {379iwl_op_mode_free_skb(trans->op_mode, skb);380txq->entries[idx].skb = NULL;381}382}383384/*385* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's386*/387static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)388{389struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);390struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];391392if (!txq) {393IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");394return;395}396397spin_lock_bh(&txq->reclaim_lock);398spin_lock(&txq->lock);399while (txq->write_ptr != txq->read_ptr) {400IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",401txq_id, txq->read_ptr);402403if (txq_id != trans->conf.cmd_queue) {404struct sk_buff *skb = txq->entries[txq->read_ptr].skb;405struct iwl_cmd_meta *cmd_meta =406&txq->entries[txq->read_ptr].meta;407408if (WARN_ON_ONCE(!skb))409continue;410411iwl_pcie_free_tso_pages(trans, skb, cmd_meta);412}413iwl_txq_free_tfd(trans, txq, txq->read_ptr);414txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);415416if (txq->read_ptr == txq->write_ptr &&417txq_id == trans->conf.cmd_queue)418iwl_pcie_clear_cmd_in_flight(trans);419}420421while (!skb_queue_empty(&txq->overflow_q)) {422struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);423424iwl_op_mode_free_skb(trans->op_mode, skb);425}426427spin_unlock(&txq->lock);428spin_unlock_bh(&txq->reclaim_lock);429430/* just in case - this queue may have been stopped */431iwl_trans_pcie_wake_queue(trans, txq);432}433434/*435* iwl_pcie_txq_free - Deallocate DMA queue.436* @txq: Transmit queue to deallocate.437*438* Empty queue by removing and destroying all BD's.439* Free all buffers.440* 0-fill, but do not free "txq" descriptor structure.441*/442static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)443{444struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);445struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];446struct device *dev = trans->dev;447int i;448449if (WARN_ON(!txq))450return;451452iwl_pcie_txq_unmap(trans, txq_id);453454/* De-alloc array of command/tx buffers */455if (txq_id == trans->conf.cmd_queue)456for (i = 0; i < txq->n_window; i++) {457kfree_sensitive(txq->entries[i].cmd);458kfree_sensitive(txq->entries[i].free_buf);459}460461/* De-alloc circular buffer of TFDs */462if (txq->tfds) {463dma_free_coherent(dev,464trans_pcie->txqs.tfd.size *465trans->mac_cfg->base->max_tfd_queue_size,466txq->tfds, txq->dma_addr);467txq->dma_addr = 0;468txq->tfds = NULL;469470dma_free_coherent(dev,471sizeof(*txq->first_tb_bufs) * txq->n_window,472txq->first_tb_bufs, txq->first_tb_dma);473}474475kfree(txq->entries);476txq->entries = NULL;477478timer_delete_sync(&txq->stuck_timer);479480/* 0-fill queue descriptor structure */481memset(txq, 0, sizeof(*txq));482}483484void iwl_pcie_tx_start(struct iwl_trans *trans)485{486struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);487int nq = trans->mac_cfg->base->num_of_queues;488int chan;489u32 reg_val;490int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -491SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);492493/* make sure all queue are not stopped/used */494memset(trans_pcie->txqs.queue_stopped, 0,495sizeof(trans_pcie->txqs.queue_stopped));496memset(trans_pcie->txqs.queue_used, 0,497sizeof(trans_pcie->txqs.queue_used));498499trans_pcie->scd_base_addr =500iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);501502/* reset context data, TX status and translation data */503iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +504SCD_CONTEXT_MEM_LOWER_BOUND,505NULL, clear_dwords);506507iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,508trans_pcie->txqs.scd_bc_tbls.dma >> 10);509510/* The chain extension of the SCD doesn't work well. This feature is511* enabled by default by the HW, so we need to disable it manually.512*/513if (trans->mac_cfg->base->scd_chain_ext_wa)514iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);515516iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue,517trans->conf.cmd_fifo,518IWL_DEF_WD_TIMEOUT);519520/* Activate all Tx DMA/FIFO channels */521iwl_scd_activate_fifos(trans);522523/* Enable DMA channel */524for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)525iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),526FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |527FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);528529/* Update FH chicken bits */530reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);531iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,532reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);533534/* Enable L1-Active */535if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)536iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,537APMG_PCIDEV_STT_VAL_L1_ACT_DIS);538}539540void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)541{542struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);543int txq_id;544545/*546* we should never get here in gen2 trans mode return early to avoid547* having invalid accesses548*/549if (WARN_ON_ONCE(trans->mac_cfg->gen2))550return;551552for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;553txq_id++) {554struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];555if (trans->mac_cfg->gen2)556iwl_write_direct64(trans,557FH_MEM_CBBC_QUEUE(trans, txq_id),558txq->dma_addr);559else560iwl_write_direct32(trans,561FH_MEM_CBBC_QUEUE(trans, txq_id),562txq->dma_addr >> 8);563iwl_pcie_txq_unmap(trans, txq_id);564txq->read_ptr = 0;565txq->write_ptr = 0;566}567568/* Tell NIC where to find the "keep warm" buffer */569iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,570trans_pcie->kw.dma >> 4);571572/*573* Send 0 as the scd_base_addr since the device may have be reset574* while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will575* contain garbage.576*/577iwl_pcie_tx_start(trans);578}579580static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)581{582struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);583int ch, ret;584u32 mask = 0;585586spin_lock_bh(&trans_pcie->irq_lock);587588if (!iwl_trans_grab_nic_access(trans))589goto out;590591/* Stop each Tx DMA channel */592for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {593iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);594mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);595}596597/* Wait for DMA channels to be idle */598ret = iwl_poll_bits(trans, FH_TSSR_TX_STATUS_REG, mask, 5000);599if (ret)600IWL_ERR(trans,601"Failing on timeout while stopping DMA channel %d [0x%08x]\n",602ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));603604iwl_trans_release_nic_access(trans);605606out:607spin_unlock_bh(&trans_pcie->irq_lock);608}609610/*611* iwl_pcie_tx_stop - Stop all Tx DMA channels612*/613int iwl_pcie_tx_stop(struct iwl_trans *trans)614{615struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);616int txq_id;617618/* Turn off all Tx DMA fifos */619iwl_scd_deactivate_fifos(trans);620621/* Turn off all Tx DMA channels */622iwl_pcie_tx_stop_fh(trans);623624/*625* This function can be called before the op_mode disabled the626* queues. This happens when we have an rfkill interrupt.627* Since we stop Tx altogether - mark the queues as stopped.628*/629memset(trans_pcie->txqs.queue_stopped, 0,630sizeof(trans_pcie->txqs.queue_stopped));631memset(trans_pcie->txqs.queue_used, 0,632sizeof(trans_pcie->txqs.queue_used));633634/* This can happen: start_hw, stop_device */635if (!trans_pcie->txq_memory)636return 0;637638/* Unmap DMA from host system and free skb's */639for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;640txq_id++)641iwl_pcie_txq_unmap(trans, txq_id);642643return 0;644}645646/*647* iwl_trans_tx_free - Free TXQ Context648*649* Destroy all TX DMA queues and structures650*/651void iwl_pcie_tx_free(struct iwl_trans *trans)652{653int txq_id;654struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);655656memset(trans_pcie->txqs.queue_used, 0,657sizeof(trans_pcie->txqs.queue_used));658659/* Tx queues */660if (trans_pcie->txq_memory) {661for (txq_id = 0;662txq_id < trans->mac_cfg->base->num_of_queues;663txq_id++) {664iwl_pcie_txq_free(trans, txq_id);665trans_pcie->txqs.txq[txq_id] = NULL;666}667}668669kfree(trans_pcie->txq_memory);670trans_pcie->txq_memory = NULL;671672iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);673674iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);675}676677void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)678{679u32 txq_id = txq->id;680u32 status;681bool active;682u8 fifo;683684if (trans->mac_cfg->gen2) {685IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,686txq->read_ptr, txq->write_ptr);687/* TODO: access new SCD registers and dump them */688return;689}690691status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));692fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;693active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));694695IWL_ERR(trans,696"Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",697txq_id, active ? "" : "in", fifo,698jiffies_to_msecs(txq->wd_timeout),699txq->read_ptr, txq->write_ptr,700iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &701(trans->mac_cfg->base->max_tfd_queue_size - 1),702iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &703(trans->mac_cfg->base->max_tfd_queue_size - 1),704iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));705}706707static void iwl_txq_stuck_timer(struct timer_list *t)708{709struct iwl_txq *txq = timer_container_of(txq, t, stuck_timer);710struct iwl_trans *trans = txq->trans;711712spin_lock(&txq->lock);713/* check if triggered erroneously */714if (txq->read_ptr == txq->write_ptr) {715spin_unlock(&txq->lock);716return;717}718spin_unlock(&txq->lock);719720iwl_txq_log_scd_error(trans, txq);721722iwl_force_nmi(trans);723}724725int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,726int slots_num, bool cmd_queue)727{728struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);729size_t num_entries = trans->mac_cfg->gen2 ?730slots_num : trans->mac_cfg->base->max_tfd_queue_size;731size_t tfd_sz;732size_t tb0_buf_sz;733int i;734735if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))736return -EINVAL;737738if (WARN_ON(txq->entries || txq->tfds))739return -EINVAL;740741tfd_sz = trans_pcie->txqs.tfd.size * num_entries;742743timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);744txq->trans = trans;745746txq->n_window = slots_num;747748txq->entries = kcalloc(slots_num,749sizeof(struct iwl_pcie_txq_entry),750GFP_KERNEL);751752if (!txq->entries)753goto error;754755if (cmd_queue)756for (i = 0; i < slots_num; i++) {757txq->entries[i].cmd =758kmalloc(sizeof(struct iwl_device_cmd),759GFP_KERNEL);760if (!txq->entries[i].cmd)761goto error;762}763764/* Circular buffer of transmit frame descriptors (TFDs),765* shared with device766*/767txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,768&txq->dma_addr, GFP_KERNEL);769if (!txq->tfds)770goto error;771772BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);773774tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;775776txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,777&txq->first_tb_dma,778GFP_KERNEL);779if (!txq->first_tb_bufs)780goto err_free_tfds;781782for (i = 0; i < num_entries; i++) {783void *tfd = iwl_txq_get_tfd(trans, txq, i);784785if (trans->mac_cfg->gen2)786iwl_txq_set_tfd_invalid_gen2(trans, tfd);787else788iwl_txq_set_tfd_invalid_gen1(trans, tfd);789}790791return 0;792err_free_tfds:793dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);794txq->tfds = NULL;795error:796if (txq->entries && cmd_queue)797for (i = 0; i < slots_num; i++)798kfree(txq->entries[i].cmd);799kfree(txq->entries);800txq->entries = NULL;801802return -ENOMEM;803}804805#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)806807/*808* iwl_pcie_tx_alloc - allocate TX context809* Allocate all Tx DMA structures and initialize them810*/811static int iwl_pcie_tx_alloc(struct iwl_trans *trans)812{813int ret;814int txq_id, slots_num;815struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);816u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues;817818if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))819return -EINVAL;820821bc_tbls_size *= BC_TABLE_SIZE;822823/*It is not allowed to alloc twice, so warn when this happens.824* We cannot rely on the previous allocation, so free and fail */825if (WARN_ON(trans_pcie->txq_memory)) {826ret = -EINVAL;827goto error;828}829830ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,831bc_tbls_size);832if (ret) {833IWL_ERR(trans, "Scheduler BC Table allocation failed\n");834goto error;835}836837/* Alloc keep-warm buffer */838ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);839if (ret) {840IWL_ERR(trans, "Keep Warm allocation failed\n");841goto error;842}843844trans_pcie->txq_memory =845kcalloc(trans->mac_cfg->base->num_of_queues,846sizeof(struct iwl_txq), GFP_KERNEL);847if (!trans_pcie->txq_memory) {848IWL_ERR(trans, "Not enough memory for txq\n");849ret = -ENOMEM;850goto error;851}852853/* Alloc and init all Tx queues, including the command queue (#4/#9) */854for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;855txq_id++) {856bool cmd_queue = (txq_id == trans->conf.cmd_queue);857858if (cmd_queue)859slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,860trans->mac_cfg->base->min_txq_size);861else862slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,863trans->mac_cfg->base->min_ba_txq_size);864trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];865ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],866slots_num, cmd_queue);867if (ret) {868IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);869goto error;870}871trans_pcie->txqs.txq[txq_id]->id = txq_id;872}873874return 0;875876error:877iwl_pcie_tx_free(trans);878879return ret;880}881882/*883* iwl_queue_init - Initialize queue's high/low-water and read/write indexes884*/885static int iwl_queue_init(struct iwl_txq *q, int slots_num)886{887q->n_window = slots_num;888889/* slots_num must be power-of-two size, otherwise890* iwl_txq_get_cmd_index is broken.891*/892if (WARN_ON(!is_power_of_2(slots_num)))893return -EINVAL;894895q->low_mark = q->n_window / 4;896if (q->low_mark < 4)897q->low_mark = 4;898899q->high_mark = q->n_window / 8;900if (q->high_mark < 2)901q->high_mark = 2;902903q->write_ptr = 0;904q->read_ptr = 0;905906return 0;907}908909int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,910int slots_num, bool cmd_queue)911{912u32 tfd_queue_max_size =913trans->mac_cfg->base->max_tfd_queue_size;914int ret;915916txq->need_update = false;917918/* max_tfd_queue_size must be power-of-two size, otherwise919* iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.920*/921if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),922"Max tfd queue size must be a power of two, but is %d",923tfd_queue_max_size))924return -EINVAL;925926/* Initialize queue's high/low-water marks, and head/tail indexes */927ret = iwl_queue_init(txq, slots_num);928if (ret)929return ret;930931spin_lock_init(&txq->lock);932spin_lock_init(&txq->reclaim_lock);933934if (cmd_queue) {935#if defined(__linux__)936static struct lock_class_key iwl_txq_cmd_queue_lock_class;937938lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);939#endif940}941942__skb_queue_head_init(&txq->overflow_q);943944return 0;945}946947int iwl_pcie_tx_init(struct iwl_trans *trans)948{949struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);950int ret;951int txq_id, slots_num;952bool alloc = false;953954if (!trans_pcie->txq_memory) {955ret = iwl_pcie_tx_alloc(trans);956if (ret)957goto error;958alloc = true;959}960961spin_lock_bh(&trans_pcie->irq_lock);962963/* Turn off all Tx DMA fifos */964iwl_scd_deactivate_fifos(trans);965966/* Tell NIC where to find the "keep warm" buffer */967iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,968trans_pcie->kw.dma >> 4);969970spin_unlock_bh(&trans_pcie->irq_lock);971972/* Alloc and init all Tx queues, including the command queue (#4/#9) */973for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;974txq_id++) {975bool cmd_queue = (txq_id == trans->conf.cmd_queue);976977if (cmd_queue)978slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,979trans->mac_cfg->base->min_txq_size);980else981slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,982trans->mac_cfg->base->min_ba_txq_size);983ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,984cmd_queue);985if (ret) {986IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);987goto error;988}989990/*991* Tell nic where to find circular buffer of TFDs for a992* given Tx queue, and enable the DMA channel used for that993* queue.994* Circular buffer (TFD queue in DRAM) physical base address995*/996iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),997trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);998}9991000iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);1001if (trans->mac_cfg->base->num_of_queues > 20)1002iwl_set_bits_prph(trans, SCD_GP_CTRL,1003SCD_GP_CTRL_ENABLE_31_QUEUES);10041005return 0;1006error:1007/*Upon error, free only if we allocated something */1008if (alloc)1009iwl_pcie_tx_free(trans);1010return ret;1011}10121013static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,1014const struct iwl_host_cmd *cmd)1015{1016struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);10171018/* Make sure the NIC is still alive in the bus */1019if (test_bit(STATUS_TRANS_DEAD, &trans->status))1020return -ENODEV;10211022if (!trans->mac_cfg->base->apmg_wake_up_wa)1023return 0;10241025/*1026* wake up the NIC to make sure that the firmware will see the host1027* command - we will let the NIC sleep once all the host commands1028* returned. This needs to be done only on NICs that have1029* apmg_wake_up_wa set (see above.)1030*/1031if (!_iwl_trans_pcie_grab_nic_access(trans, false))1032return -EIO;10331034/*1035* In iwl_trans_grab_nic_access(), we've acquired the reg_lock.1036* There, we also returned immediately if cmd_hold_nic_awake is1037* already true, so it's OK to unconditionally set it to true.1038*/1039trans_pcie->cmd_hold_nic_awake = true;1040spin_unlock(&trans_pcie->reg_lock);10411042return 0;1043}10441045static void iwl_txq_progress(struct iwl_txq *txq)1046{1047lockdep_assert_held(&txq->lock);10481049if (!txq->wd_timeout)1050return;10511052/*1053* station is asleep and we send data - that must1054* be uAPSD or PS-Poll. Don't rearm the timer.1055*/1056if (txq->frozen)1057return;10581059/*1060* if empty delete timer, otherwise move timer forward1061* since we're making progress on this queue1062*/1063if (txq->read_ptr == txq->write_ptr)1064timer_delete(&txq->stuck_timer);1065else1066mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);1067}10681069static inline bool iwl_txq_used(const struct iwl_txq *q, int i,1070int read_ptr, int write_ptr)1071{1072int index = iwl_txq_get_cmd_index(q, i);1073int r = iwl_txq_get_cmd_index(q, read_ptr);1074int w = iwl_txq_get_cmd_index(q, write_ptr);10751076return w >= r ?1077(index >= r && index < w) :1078!(index < r && index >= w);1079}10801081/*1082* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd1083*1084* When FW advances 'R' index, all entries between old and new 'R' index1085* need to be reclaimed. As result, some free space forms. If there is1086* enough free space (> low mark), wake the stack that feeds us.1087*/1088static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)1089{1090struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1091struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];1092int nfreed = 0;1093u16 r;10941095lockdep_assert_held(&txq->lock);10961097idx = iwl_txq_get_cmd_index(txq, idx);1098r = iwl_txq_get_cmd_index(txq, txq->read_ptr);10991100if (idx >= trans->mac_cfg->base->max_tfd_queue_size ||1101(!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {1102WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),1103"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",1104__func__, txq_id, idx,1105trans->mac_cfg->base->max_tfd_queue_size,1106txq->write_ptr, txq->read_ptr);1107return;1108}11091110for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;1111r = iwl_txq_inc_wrap(trans, r)) {1112txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);11131114if (nfreed++ > 0) {1115IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",1116idx, txq->write_ptr, r);1117iwl_force_nmi(trans);1118}1119}11201121if (txq->read_ptr == txq->write_ptr)1122iwl_pcie_clear_cmd_in_flight(trans);11231124iwl_txq_progress(txq);1125}11261127static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,1128u16 txq_id)1129{1130struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1131u32 tbl_dw_addr;1132u32 tbl_dw;1133u16 scd_q2ratid;11341135scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;11361137tbl_dw_addr = trans_pcie->scd_base_addr +1138SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);11391140tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);11411142if (txq_id & 0x1)1143tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);1144else1145tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);11461147iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);11481149return 0;1150}11511152/* Receiver address (actually, Rx station's index into station table),1153* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */1154#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))11551156bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,1157const struct iwl_trans_txq_scd_cfg *cfg,1158unsigned int wdg_timeout)1159{1160struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1161struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];1162int fifo = -1;1163bool scd_bug = false;11641165if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))1166WARN_ONCE(1, "queue %d already used - expect issues", txq_id);11671168txq->wd_timeout = msecs_to_jiffies(wdg_timeout);11691170if (cfg) {1171fifo = cfg->fifo;11721173/* Disable the scheduler prior configuring the cmd queue */1174if (txq_id == trans->conf.cmd_queue &&1175trans->conf.scd_set_active)1176iwl_scd_enable_set_active(trans, 0);11771178/* Stop this Tx queue before configuring it */1179iwl_scd_txq_set_inactive(trans, txq_id);11801181/* Set this queue as a chain-building queue unless it is CMD */1182if (txq_id != trans->conf.cmd_queue)1183iwl_scd_txq_set_chain(trans, txq_id);11841185if (cfg->aggregate) {1186u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);11871188/* Map receiver-address / traffic-ID to this queue */1189iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);11901191/* enable aggregations for the queue */1192iwl_scd_txq_enable_agg(trans, txq_id);1193txq->ampdu = true;1194} else {1195/*1196* disable aggregations for the queue, this will also1197* make the ra_tid mapping configuration irrelevant1198* since it is now a non-AGG queue.1199*/1200iwl_scd_txq_disable_agg(trans, txq_id);12011202ssn = txq->read_ptr;1203}1204} else {1205/*1206* If we need to move the SCD write pointer by steps of1207* 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let1208* the op_mode know by returning true later.1209* Do this only in case cfg is NULL since this trick can1210* be done only if we have DQA enabled which is true for mvm1211* only. And mvm never sets a cfg pointer.1212* This is really ugly, but this is the easiest way out for1213* this sad hardware issue.1214* This bug has been fixed on devices 9000 and up.1215*/1216scd_bug = !trans->mac_cfg->mq_rx_supported &&1217!((ssn - txq->write_ptr) & 0x3f) &&1218(ssn != txq->write_ptr);1219if (scd_bug)1220ssn++;1221}12221223/* Place first TFD at index corresponding to start sequence number.1224* Assumes that ssn_idx is valid (!= 0xFFF) */1225txq->read_ptr = (ssn & 0xff);1226txq->write_ptr = (ssn & 0xff);1227iwl_write_direct32(trans, HBUS_TARG_WRPTR,1228(ssn & 0xff) | (txq_id << 8));12291230if (cfg) {1231u8 frame_limit = cfg->frame_limit;12321233iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);12341235/* Set up Tx window size and frame limit for this queue */1236iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +1237SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);1238iwl_trans_write_mem32(trans,1239trans_pcie->scd_base_addr +1240SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),1241SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |1242SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));12431244/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */1245iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),1246(1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |1247(cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |1248(1 << SCD_QUEUE_STTS_REG_POS_WSL) |1249SCD_QUEUE_STTS_REG_MSK);12501251/* enable the scheduler for this queue (only) */1252if (txq_id == trans->conf.cmd_queue &&1253trans->conf.scd_set_active)1254iwl_scd_enable_set_active(trans, BIT(txq_id));12551256IWL_DEBUG_TX_QUEUES(trans,1257"Activate queue %d on FIFO %d WrPtr: %d\n",1258txq_id, fifo, ssn & 0xff);1259} else {1260IWL_DEBUG_TX_QUEUES(trans,1261"Activate queue %d WrPtr: %d\n",1262txq_id, ssn & 0xff);1263}12641265return scd_bug;1266}12671268void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,1269bool shared_mode)1270{1271struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1272struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];12731274txq->ampdu = !shared_mode;1275}12761277void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,1278bool configure_scd)1279{1280struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1281u32 stts_addr = trans_pcie->scd_base_addr +1282SCD_TX_STTS_QUEUE_OFFSET(txq_id);1283static const u32 zero_val[4] = {};12841285trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;1286trans_pcie->txqs.txq[txq_id]->frozen = false;12871288/*1289* Upon HW Rfkill - we stop the device, and then stop the queues1290* in the op_mode. Just for the sake of the simplicity of the op_mode,1291* allow the op_mode to call txq_disable after it already called1292* stop_device.1293*/1294if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {1295WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),1296"queue %d not used", txq_id);1297return;1298}12991300if (configure_scd) {1301iwl_scd_txq_set_inactive(trans, txq_id);13021303iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,1304ARRAY_SIZE(zero_val));1305}13061307iwl_pcie_txq_unmap(trans, txq_id);1308trans_pcie->txqs.txq[txq_id]->ampdu = false;13091310IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);1311}13121313/*************** HOST COMMAND QUEUE FUNCTIONS *****/13141315static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)1316{1317struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1318int i;13191320for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {1321struct iwl_txq *txq = trans_pcie->txqs.txq[i];13221323if (i == trans->conf.cmd_queue)1324continue;13251326/* we skip the command queue (obviously) so it's OK to nest */1327spin_lock_nested(&txq->lock, 1);13281329if (!block && !(WARN_ON_ONCE(!txq->block))) {1330txq->block--;1331if (!txq->block) {1332iwl_write32(trans, HBUS_TARG_WRPTR,1333txq->write_ptr | (i << 8));1334}1335} else if (block) {1336txq->block++;1337}13381339spin_unlock(&txq->lock);1340}1341}13421343/*1344* iwl_pcie_enqueue_hcmd - enqueue a uCode command1345* @priv: device private data point1346* @cmd: a pointer to the ucode command structure1347*1348* The function returns < 0 values to indicate the operation1349* failed. On success, it returns the index (>= 0) of command in the1350* command queue.1351*/1352int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,1353struct iwl_host_cmd *cmd)1354{1355struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1356struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];1357struct iwl_device_cmd *out_cmd;1358struct iwl_cmd_meta *out_meta;1359void *dup_buf = NULL;1360dma_addr_t phys_addr;1361int idx;1362u16 copy_size, cmd_size, tb0_size;1363bool had_nocopy = false;1364u8 group_id = iwl_cmd_groupid(cmd->id);1365int i, ret;1366u32 cmd_pos;1367const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];1368u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];1369unsigned long flags;13701371if (WARN(!trans->conf.wide_cmd_header &&1372group_id > IWL_ALWAYS_LONG_GROUP,1373"unsupported wide command %#x\n", cmd->id))1374return -EINVAL;13751376if (group_id != 0) {1377copy_size = sizeof(struct iwl_cmd_header_wide);1378cmd_size = sizeof(struct iwl_cmd_header_wide);1379} else {1380copy_size = sizeof(struct iwl_cmd_header);1381cmd_size = sizeof(struct iwl_cmd_header);1382}13831384/* need one for the header if the first is NOCOPY */1385BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);13861387for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {1388cmddata[i] = cmd->data[i];1389cmdlen[i] = cmd->len[i];13901391if (!cmd->len[i])1392continue;13931394/* need at least IWL_FIRST_TB_SIZE copied */1395if (copy_size < IWL_FIRST_TB_SIZE) {1396int copy = IWL_FIRST_TB_SIZE - copy_size;13971398if (copy > cmdlen[i])1399copy = cmdlen[i];1400cmdlen[i] -= copy;1401cmddata[i] += copy;1402copy_size += copy;1403}14041405if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {1406had_nocopy = true;1407if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {1408idx = -EINVAL;1409goto free_dup_buf;1410}1411} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {1412/*1413* This is also a chunk that isn't copied1414* to the static buffer so set had_nocopy.1415*/1416had_nocopy = true;14171418/* only allowed once */1419if (WARN_ON(dup_buf)) {1420idx = -EINVAL;1421goto free_dup_buf;1422}14231424dup_buf = kmemdup(cmddata[i], cmdlen[i],1425GFP_ATOMIC);1426if (!dup_buf)1427return -ENOMEM;1428} else {1429/* NOCOPY must not be followed by normal! */1430if (WARN_ON(had_nocopy)) {1431idx = -EINVAL;1432goto free_dup_buf;1433}1434copy_size += cmdlen[i];1435}1436cmd_size += cmd->len[i];1437}14381439/*1440* If any of the command structures end up being larger than1441* the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically1442* allocated into separate TFDs, then we will need to1443* increase the size of the buffers.1444*/1445if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,1446"Command %s (%#x) is too large (%d bytes)\n",1447iwl_get_cmd_string(trans, cmd->id),1448cmd->id, copy_size)) {1449idx = -EINVAL;1450goto free_dup_buf;1451}14521453spin_lock_irqsave(&txq->lock, flags);14541455if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {1456spin_unlock_irqrestore(&txq->lock, flags);14571458IWL_ERR(trans, "No space in command queue\n");1459iwl_op_mode_nic_error(trans->op_mode,1460IWL_ERR_TYPE_CMD_QUEUE_FULL);1461iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);1462idx = -ENOSPC;1463goto free_dup_buf;1464}14651466idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);1467out_cmd = txq->entries[idx].cmd;1468out_meta = &txq->entries[idx].meta;14691470/* re-initialize, this also marks the SG list as unused */1471memset(out_meta, 0, sizeof(*out_meta));1472if (cmd->flags & CMD_WANT_SKB)1473out_meta->source = cmd;14741475/* set up the header */1476if (group_id != 0) {1477out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);1478out_cmd->hdr_wide.group_id = group_id;1479out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);1480out_cmd->hdr_wide.length =1481cpu_to_le16(cmd_size -1482sizeof(struct iwl_cmd_header_wide));1483out_cmd->hdr_wide.reserved = 0;1484out_cmd->hdr_wide.sequence =1485cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |1486INDEX_TO_SEQ(txq->write_ptr));14871488cmd_pos = sizeof(struct iwl_cmd_header_wide);1489copy_size = sizeof(struct iwl_cmd_header_wide);1490} else {1491out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);1492out_cmd->hdr.sequence =1493cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |1494INDEX_TO_SEQ(txq->write_ptr));1495out_cmd->hdr.group_id = 0;14961497cmd_pos = sizeof(struct iwl_cmd_header);1498copy_size = sizeof(struct iwl_cmd_header);1499}15001501/* and copy the data that needs to be copied */1502for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {1503int copy;15041505if (!cmd->len[i])1506continue;15071508/* copy everything if not nocopy/dup */1509if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |1510IWL_HCMD_DFL_DUP))) {1511copy = cmd->len[i];15121513memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);1514cmd_pos += copy;1515copy_size += copy;1516continue;1517}15181519/*1520* Otherwise we need at least IWL_FIRST_TB_SIZE copied1521* in total (for bi-directional DMA), but copy up to what1522* we can fit into the payload for debug dump purposes.1523*/1524copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);15251526memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);1527cmd_pos += copy;15281529/* However, treat copy_size the proper way, we need it below */1530if (copy_size < IWL_FIRST_TB_SIZE) {1531copy = IWL_FIRST_TB_SIZE - copy_size;15321533if (copy > cmd->len[i])1534copy = cmd->len[i];1535copy_size += copy;1536}1537}15381539IWL_DEBUG_HC(trans,1540"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",1541iwl_get_cmd_string(trans, cmd->id),1542group_id, out_cmd->hdr.cmd,1543le16_to_cpu(out_cmd->hdr.sequence),1544cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);15451546/* start the TFD with the minimum copy bytes */1547tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);1548memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);1549iwl_pcie_txq_build_tfd(trans, txq,1550iwl_txq_get_first_tb_dma(txq, idx),1551tb0_size, true);15521553/* map first command fragment, if any remains */1554if (copy_size > tb0_size) {1555phys_addr = dma_map_single(trans->dev,1556((u8 *)&out_cmd->hdr) + tb0_size,1557copy_size - tb0_size,1558DMA_TO_DEVICE);1559if (dma_mapping_error(trans->dev, phys_addr)) {1560iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,1561txq->write_ptr);1562idx = -ENOMEM;1563goto out;1564}15651566iwl_pcie_txq_build_tfd(trans, txq, phys_addr,1567copy_size - tb0_size, false);1568}15691570/* map the remaining (adjusted) nocopy/dup fragments */1571for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {1572void *data = (void *)(uintptr_t)cmddata[i];15731574if (!cmdlen[i])1575continue;1576if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |1577IWL_HCMD_DFL_DUP)))1578continue;1579if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)1580data = dup_buf;1581phys_addr = dma_map_single(trans->dev, data,1582cmdlen[i], DMA_TO_DEVICE);1583if (dma_mapping_error(trans->dev, phys_addr)) {1584iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,1585txq->write_ptr);1586idx = -ENOMEM;1587goto out;1588}15891590iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);1591}15921593BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);1594out_meta->flags = cmd->flags;1595if (WARN_ON_ONCE(txq->entries[idx].free_buf))1596kfree_sensitive(txq->entries[idx].free_buf);1597txq->entries[idx].free_buf = dup_buf;15981599trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);16001601/* start timer if queue currently empty */1602if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)1603mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);16041605ret = iwl_pcie_set_cmd_in_flight(trans, cmd);1606if (ret < 0) {1607idx = ret;1608goto out;1609}16101611if (cmd->flags & CMD_BLOCK_TXQS)1612iwl_trans_pcie_block_txq_ptrs(trans, true);16131614/* Increment and update queue's write index */1615txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);1616iwl_pcie_txq_inc_wr_ptr(trans, txq);16171618out:1619spin_unlock_irqrestore(&txq->lock, flags);1620free_dup_buf:1621if (idx < 0)1622kfree(dup_buf);1623return idx;1624}16251626/*1627* iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them1628* @rxb: Rx buffer to reclaim1629*/1630void iwl_pcie_hcmd_complete(struct iwl_trans *trans,1631struct iwl_rx_cmd_buffer *rxb)1632{1633struct iwl_rx_packet *pkt = rxb_addr(rxb);1634u16 sequence = le16_to_cpu(pkt->hdr.sequence);1635u8 group_id;1636u32 cmd_id;1637int txq_id = SEQ_TO_QUEUE(sequence);1638int index = SEQ_TO_INDEX(sequence);1639int cmd_index;1640struct iwl_device_cmd *cmd;1641struct iwl_cmd_meta *meta;1642struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1643struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];16441645/* If a Tx command is being handled and it isn't in the actual1646* command queue then there a command routing bug has been introduced1647* in the queue management code. */1648if (IWL_FW_CHECK(trans, txq_id != trans->conf.cmd_queue,1649"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d pkt=%*phN\n",1650txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr,1651txq->write_ptr, 32, pkt))1652return;16531654spin_lock_bh(&txq->lock);16551656cmd_index = iwl_txq_get_cmd_index(txq, index);1657cmd = txq->entries[cmd_index].cmd;1658meta = &txq->entries[cmd_index].meta;1659group_id = cmd->hdr.group_id;1660cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);16611662if (trans->mac_cfg->gen2)1663iwl_txq_gen2_tfd_unmap(trans, meta,1664iwl_txq_get_tfd(trans, txq, index));1665else1666iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);16671668/* Input error checking is done when commands are added to queue. */1669if (meta->flags & CMD_WANT_SKB) {1670struct page *p = rxb_steal_page(rxb);16711672meta->source->resp_pkt = pkt;1673#if defined(__linux__)1674meta->source->_rx_page_addr = (unsigned long)page_address(p);1675#elif defined(__FreeBSD__)1676meta->source->_page = p;1677#endif1678meta->source->_rx_page_order = trans_pcie->rx_page_order;1679}16801681if (meta->flags & CMD_BLOCK_TXQS)1682iwl_trans_pcie_block_txq_ptrs(trans, false);16831684iwl_pcie_cmdq_reclaim(trans, txq_id, index);16851686if (!(meta->flags & CMD_ASYNC)) {1687if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {1688IWL_WARN(trans,1689"HCMD_ACTIVE already clear for command %s\n",1690iwl_get_cmd_string(trans, cmd_id));1691}1692clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);1693IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",1694iwl_get_cmd_string(trans, cmd_id));1695wake_up(&trans_pcie->wait_command_queue);1696}16971698meta->flags = 0;16991700spin_unlock_bh(&txq->lock);1701}17021703static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,1704struct iwl_txq *txq, u8 hdr_len,1705struct iwl_cmd_meta *out_meta)1706{1707u16 head_tb_len;1708int i;17091710/*1711* Set up TFD's third entry to point directly to remainder1712* of skb's head, if any1713*/1714head_tb_len = skb_headlen(skb) - hdr_len;17151716if (head_tb_len > 0) {1717dma_addr_t tb_phys = dma_map_single(trans->dev,1718skb->data + hdr_len,1719head_tb_len, DMA_TO_DEVICE);1720if (unlikely(dma_mapping_error(trans->dev, tb_phys)))1721return -EINVAL;1722trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,1723tb_phys, head_tb_len);1724iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);1725}17261727/* set up the remaining entries to point to the data */1728for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {1729const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];1730dma_addr_t tb_phys;1731int tb_idx;17321733if (!skb_frag_size(frag))1734continue;17351736tb_phys = skb_frag_dma_map(trans->dev, frag, 0,1737skb_frag_size(frag), DMA_TO_DEVICE);17381739if (unlikely(dma_mapping_error(trans->dev, tb_phys)))1740return -EINVAL;1741trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),1742tb_phys, skb_frag_size(frag));1743tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,1744skb_frag_size(frag), false);1745if (tb_idx < 0)1746return tb_idx;17471748out_meta->tbs |= BIT(tb_idx);1749}17501751return 0;1752}17531754#ifdef CONFIG_INET1755static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,1756size_t len, struct sk_buff *skb)1757{1758struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1759struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);1760struct iwl_tso_page_info *info;1761struct page **page_ptr;1762dma_addr_t phys;1763void *ret;17641765page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);17661767if (WARN_ON(*page_ptr))1768return NULL;17691770if (!p->page)1771goto alloc;17721773/*1774* Check if there's enough room on this page1775*1776* Note that we put a page chaining pointer *last* in the1777* page - we need it somewhere, and if it's there then we1778* avoid DMA mapping the last bits of the page which may1779* trigger the 32-bit boundary hardware bug.1780*1781* (see also get_workaround_page() in tx-gen2.c)1782*/1783if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {1784info = IWL_TSO_PAGE_INFO(page_address(p->page));1785goto out;1786}17871788/* We don't have enough room on this page, get a new one. */1789iwl_pcie_free_and_unmap_tso_page(trans, p->page);17901791alloc:1792p->page = alloc_page(GFP_ATOMIC);1793if (!p->page)1794return NULL;1795p->pos = page_address(p->page);17961797info = IWL_TSO_PAGE_INFO(page_address(p->page));17981799/* set the chaining pointer to NULL */1800info->next = NULL;18011802/* Create a DMA mapping for the page */1803phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,1804DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);1805if (unlikely(dma_mapping_error(trans->dev, phys))) {1806__free_page(p->page);1807p->page = NULL;18081809return NULL;1810}18111812/* Store physical address and set use count */1813info->dma_addr = phys;1814refcount_set(&info->use_count, 1);1815out:1816*page_ptr = p->page;1817/* Return an internal reference for the caller */1818refcount_inc(&info->use_count);1819ret = p->pos;1820p->pos += len;18211822return ret;1823}18241825/**1826* iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list1827* @sgt: scatter gather table1828* @offset: Offset into the mapped memory (i.e. SKB payload data)1829* @len: Length of the area1830*1831* Find the DMA address that corresponds to the SKB payload data at the1832* position given by @offset.1833*1834* Returns: Address for TB entry1835*/1836dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,1837unsigned int len)1838{1839struct scatterlist *sg;1840unsigned int sg_offset = 0;1841int i;18421843/*1844* Search the mapped DMA areas in the SG for the area that contains the1845* data at offset with the given length.1846*/1847for_each_sgtable_dma_sg(sgt, sg, i) {1848if (offset >= sg_offset &&1849offset + len <= sg_offset + sg_dma_len(sg))1850return sg_dma_address(sg) + offset - sg_offset;18511852sg_offset += sg_dma_len(sg);1853}18541855WARN_ON_ONCE(1);18561857return DMA_MAPPING_ERROR;1858}18591860/**1861* iwl_pcie_prep_tso - Prepare TSO page and SKB for sending1862* @trans: transport private data1863* @skb: the SKB to map1864* @cmd_meta: command meta to store the scatter list information for unmapping1865* @hdr: output argument for TSO headers1866* @hdr_room: requested length for TSO headers1867* @offset: offset into the data from which mapping should start1868*1869* Allocate space for a scatter gather list and TSO headers and map the SKB1870* using the scatter gather list. The SKB is unmapped again when the page is1871* free'ed again at the end of the operation.1872*1873* Returns: newly allocated and mapped scatter gather table with list1874*/1875struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,1876struct iwl_cmd_meta *cmd_meta,1877u8 **hdr, unsigned int hdr_room,1878unsigned int offset)1879{1880struct sg_table *sgt;1881unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;1882int orig_nents;18831884if (WARN_ON_ONCE(skb_has_frag_list(skb)))1885return NULL;18861887*hdr = iwl_pcie_get_page_hdr(trans,1888hdr_room + __alignof__(struct sg_table) +1889sizeof(struct sg_table) +1890n_segments * sizeof(struct scatterlist),1891skb);1892if (!*hdr)1893return NULL;18941895sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));1896sgt->sgl = (void *)(sgt + 1);18971898sg_init_table(sgt->sgl, n_segments);18991900/* Only map the data, not the header (it is copied to the TSO page) */1901orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);1902if (WARN_ON_ONCE(orig_nents <= 0))1903return NULL;19041905sgt->orig_nents = orig_nents;19061907/* And map the entire SKB */1908if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)1909return NULL;19101911/* Store non-zero (i.e. valid) offset for unmapping */1912cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;19131914return sgt;1915}19161917static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,1918struct iwl_txq *txq, u8 hdr_len,1919struct iwl_cmd_meta *out_meta,1920struct iwl_device_tx_cmd *dev_cmd,1921u16 tb1_len)1922{1923struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1924struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;1925struct ieee80211_hdr *hdr = (void *)skb->data;1926unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;1927unsigned int mss = skb_shinfo(skb)->gso_size;1928unsigned int data_offset = 0;1929u16 length, iv_len, amsdu_pad;1930dma_addr_t start_hdr_phys;1931u8 *start_hdr, *pos_hdr;1932struct sg_table *sgt;1933struct tso_t tso;19341935/* if the packet is protected, then it must be CCMP or GCMP */1936BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);1937iv_len = ieee80211_has_protected(hdr->frame_control) ?1938IEEE80211_CCMP_HDR_LEN : 0;19391940trace_iwlwifi_dev_tx(trans->dev, skb,1941iwl_txq_get_tfd(trans, txq, txq->write_ptr),1942trans_pcie->txqs.tfd.size,1943&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);19441945ip_hdrlen = skb_network_header_len(skb);1946snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);1947total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;1948amsdu_pad = 0;19491950/* total amount of header we may need for this A-MSDU */1951hdr_room = DIV_ROUND_UP(total_len, mss) *1952(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;19531954/* Our device supports 9 segments at most, it will fit in 1 page */1955sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,1956snap_ip_tcp_hdrlen + hdr_len + iv_len);1957if (!sgt)1958return -ENOMEM;19591960start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);1961pos_hdr = start_hdr;1962memcpy(pos_hdr, skb->data + hdr_len, iv_len);1963pos_hdr += iv_len;19641965/*1966* Pull the ieee80211 header + IV to be able to use TSO core,1967* we will restore it for the tx_status flow.1968*/1969skb_pull(skb, hdr_len + iv_len);19701971/*1972* Remove the length of all the headers that we don't actually1973* have in the MPDU by themselves, but that we duplicate into1974* all the different MSDUs inside the A-MSDU.1975*/1976le16_add_cpu(&tx_cmd->params.len, -snap_ip_tcp_hdrlen);19771978tso_start(skb, &tso);19791980while (total_len) {1981/* this is the data left for this subframe */1982unsigned int data_left =1983min_t(unsigned int, mss, total_len);1984unsigned int hdr_tb_len;1985dma_addr_t hdr_tb_phys;1986u8 *subf_hdrs_start = pos_hdr;19871988total_len -= data_left;19891990memset(pos_hdr, 0, amsdu_pad);1991pos_hdr += amsdu_pad;1992amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +1993data_left)) & 0x3;1994ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));1995pos_hdr += ETH_ALEN;1996ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));1997pos_hdr += ETH_ALEN;19981999length = snap_ip_tcp_hdrlen + data_left;2000*((__be16 *)pos_hdr) = cpu_to_be16(length);2001pos_hdr += sizeof(length);20022003/*2004* This will copy the SNAP as well which will be considered2005* as MAC header.2006*/2007tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);20082009pos_hdr += snap_ip_tcp_hdrlen;20102011hdr_tb_len = pos_hdr - start_hdr;2012hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);20132014iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,2015hdr_tb_len, false);2016trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,2017hdr_tb_phys, hdr_tb_len);2018/* add this subframe's headers' length to the tx_cmd */2019le16_add_cpu(&tx_cmd->params.len, pos_hdr - subf_hdrs_start);20202021/* prepare the start_hdr for the next subframe */2022start_hdr = pos_hdr;20232024/* put the payload */2025while (data_left) {2026unsigned int size = min_t(unsigned int, tso.size,2027data_left);2028dma_addr_t tb_phys;20292030tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);2031/* Not a real mapping error, use direct comparison */2032if (unlikely(tb_phys == DMA_MAPPING_ERROR))2033return -EINVAL;20342035iwl_pcie_txq_build_tfd(trans, txq, tb_phys,2036size, false);2037trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,2038tb_phys, size);20392040data_left -= size;2041data_offset += size;2042tso_build_data(skb, &tso, size);2043}2044}20452046dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,2047DMA_TO_DEVICE);20482049/* re -add the WiFi header and IV */2050skb_push(skb, hdr_len + iv_len);20512052return 0;2053}2054#else /* CONFIG_INET */2055static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,2056struct iwl_txq *txq, u8 hdr_len,2057struct iwl_cmd_meta *out_meta,2058struct iwl_device_tx_cmd *dev_cmd,2059u16 tb1_len)2060{2061/* No A-MSDU without CONFIG_INET */2062WARN_ON(1);20632064return -1;2065}2066#endif /* CONFIG_INET */20672068#define IWL_TX_CRC_SIZE 42069#define IWL_TX_DELIMITER_SIZE 420702071/*2072* iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array2073*/2074static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,2075struct iwl_txq *txq, u16 byte_cnt,2076int num_tbs)2077{2078struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2079struct iwl_bc_tbl_entry *scd_bc_tbl;2080int write_ptr = txq->write_ptr;2081int txq_id = txq->id;2082u8 sec_ctl = 0;2083u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;2084__le16 bc_ent;2085struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;2086struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;2087u8 sta_id = tx_cmd->params.sta_id;20882089scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;20902091sec_ctl = tx_cmd->params.sec_ctl;20922093switch (sec_ctl & TX_CMD_SEC_MSK) {2094case TX_CMD_SEC_CCM:2095len += IEEE80211_CCMP_MIC_LEN;2096break;2097case TX_CMD_SEC_TKIP:2098len += IEEE80211_TKIP_ICV_LEN;2099break;2100case TX_CMD_SEC_WEP:2101len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;2102break;2103}21042105if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_7000 &&2106trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)2107len = DIV_ROUND_UP(len, 4);21082109if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))2110return;21112112bc_ent = cpu_to_le16(len | (sta_id << 12));21132114scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + write_ptr].tfd_offset = bc_ent;21152116if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)2117scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =2118bc_ent;2119}21202121int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,2122struct iwl_device_tx_cmd *dev_cmd, int txq_id)2123{2124struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2125struct ieee80211_hdr *hdr;2126struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;2127struct iwl_cmd_meta *out_meta;2128struct iwl_txq *txq;2129dma_addr_t tb0_phys, tb1_phys, scratch_phys;2130void *tb1_addr;2131void *tfd;2132u16 len, tb1_len;2133bool wait_write_ptr;2134__le16 fc;2135u8 hdr_len;2136u16 wifi_seq;2137bool amsdu;21382139txq = trans_pcie->txqs.txq[txq_id];21402141if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),2142"TX on unused queue %d\n", txq_id))2143return -EINVAL;21442145if (skb_is_nonlinear(skb) &&2146skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&2147__skb_linearize(skb))2148return -ENOMEM;21492150/* mac80211 always puts the full header into the SKB's head,2151* so there's no need to check if it's readable there2152*/2153hdr = (struct ieee80211_hdr *)skb->data;2154fc = hdr->frame_control;2155hdr_len = ieee80211_hdrlen(fc);21562157spin_lock(&txq->lock);21582159if (iwl_txq_space(trans, txq) < txq->high_mark) {2160iwl_txq_stop(trans, txq);21612162/* don't put the packet on the ring, if there is no room */2163if (unlikely(iwl_txq_space(trans, txq) < 3)) {2164struct iwl_device_tx_cmd **dev_cmd_ptr;21652166dev_cmd_ptr = (void *)((u8 *)skb->cb +2167trans->conf.cb_data_offs +2168sizeof(void *));21692170*dev_cmd_ptr = dev_cmd;2171__skb_queue_tail(&txq->overflow_q, skb);21722173spin_unlock(&txq->lock);2174return 0;2175}2176}21772178/* In AGG mode, the index in the ring must correspond to the WiFi2179* sequence number. This is a HW requirements to help the SCD to parse2180* the BA.2181* Check here that the packets are in the right place on the ring.2182*/2183wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));2184WARN_ONCE(txq->ampdu &&2185(wifi_seq & 0xff) != txq->write_ptr,2186"Q: %d WiFi Seq %d tfdNum %d",2187txq_id, wifi_seq, txq->write_ptr);21882189/* Set up driver data for this TFD */2190txq->entries[txq->write_ptr].skb = skb;2191txq->entries[txq->write_ptr].cmd = dev_cmd;21922193dev_cmd->hdr.sequence =2194cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |2195INDEX_TO_SEQ(txq->write_ptr)));21962197tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);2198scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +2199offsetof(struct iwl_tx_cmd_v6_params, scratch);22002201tx_cmd->params.dram_lsb_ptr = cpu_to_le32(scratch_phys);2202tx_cmd->params.dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);22032204/* Set up first empty entry in queue's array of Tx/cmd buffers */2205out_meta = &txq->entries[txq->write_ptr].meta;2206memset(out_meta, 0, sizeof(*out_meta));22072208/*2209* The second TB (tb1) points to the remainder of the TX command2210* and the 802.11 header - dword aligned size2211* (This calculation modifies the TX command, so do it before the2212* setup of the first TB)2213*/2214len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) +2215hdr_len - IWL_FIRST_TB_SIZE;2216/* do not align A-MSDU to dword as the subframe header aligns it */2217amsdu = ieee80211_is_data_qos(fc) &&2218(*ieee80211_get_qos_ctl(hdr) &2219IEEE80211_QOS_CTL_A_MSDU_PRESENT);2220if (!amsdu) {2221tb1_len = ALIGN(len, 4);2222/* Tell NIC about any 2-byte padding after MAC header */2223if (tb1_len != len)2224tx_cmd->params.tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);2225} else {2226tb1_len = len;2227}22282229/*2230* The first TB points to bi-directional DMA data, we'll2231* memcpy the data into it later.2232*/2233iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,2234IWL_FIRST_TB_SIZE, true);22352236/* there must be data left over for TB1 or this code must be changed */2237BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);2238BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +2239offsetofend(struct iwl_tx_cmd_v6_params, scratch) >2240IWL_FIRST_TB_SIZE);22412242/* map the data for TB1 */2243tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;2244tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);2245if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))2246goto out_err;2247iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);22482249trace_iwlwifi_dev_tx(trans->dev, skb,2250iwl_txq_get_tfd(trans, txq, txq->write_ptr),2251trans_pcie->txqs.tfd.size,2252&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,2253hdr_len);22542255/*2256* If gso_size wasn't set, don't give the frame "amsdu treatment"2257* (adding subframes, etc.).2258* This can happen in some testing flows when the amsdu was already2259* pre-built, and we just need to send the resulting skb.2260*/2261if (amsdu && skb_shinfo(skb)->gso_size) {2262if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,2263out_meta, dev_cmd,2264tb1_len)))2265goto out_err;2266} else {2267struct sk_buff *frag;22682269if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,2270out_meta)))2271goto out_err;22722273skb_walk_frags(skb, frag) {2274if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,2275out_meta)))2276goto out_err;2277}2278}22792280/* building the A-MSDU might have changed this data, so memcpy it now */2281memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);22822283tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);2284/* Set up entry for this TFD in Tx byte-count array */2285iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->params.len),2286iwl_txq_gen1_tfd_get_num_tbs(tfd));22872288wait_write_ptr = ieee80211_has_morefrags(fc);22892290/* start timer if queue currently empty */2291if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {2292/*2293* If the TXQ is active, then set the timer, if not,2294* set the timer in remainder so that the timer will2295* be armed with the right value when the station will2296* wake up.2297*/2298if (!txq->frozen)2299mod_timer(&txq->stuck_timer,2300jiffies + txq->wd_timeout);2301else2302txq->frozen_expiry_remainder = txq->wd_timeout;2303}23042305/* Tell device the write index *just past* this latest filled TFD */2306txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);2307if (!wait_write_ptr)2308iwl_pcie_txq_inc_wr_ptr(trans, txq);23092310/*2311* At this point the frame is "transmitted" successfully2312* and we will get a TX status notification eventually.2313*/2314spin_unlock(&txq->lock);2315return 0;2316out_err:2317iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);2318spin_unlock(&txq->lock);2319return -1;2320}23212322static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,2323struct iwl_txq *txq,2324int read_ptr)2325{2326struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2327struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;2328int txq_id = txq->id;2329u8 sta_id = 0;2330__le16 bc_ent;2331struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;2332struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;23332334WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);23352336if (txq_id != trans->conf.cmd_queue)2337sta_id = tx_cmd->params.sta_id;23382339bc_ent = cpu_to_le16(1 | (sta_id << 12));23402341scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + read_ptr].tfd_offset = bc_ent;23422343if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)2344scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =2345bc_ent;2346}23472348/* Frees buffers until index _not_ inclusive */2349void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,2350struct sk_buff_head *skbs, bool is_flush)2351{2352struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2353struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];2354int tfd_num, read_ptr, last_to_free;2355int txq_read_ptr, txq_write_ptr;23562357/* This function is not meant to release cmd queue*/2358if (WARN_ON(txq_id == trans->conf.cmd_queue))2359return;23602361if (WARN_ON(!txq))2362return;23632364tfd_num = iwl_txq_get_cmd_index(txq, ssn);23652366spin_lock_bh(&txq->reclaim_lock);23672368spin_lock(&txq->lock);2369txq_read_ptr = txq->read_ptr;2370txq_write_ptr = txq->write_ptr;2371spin_unlock(&txq->lock);23722373/* There is nothing to do if we are flushing an empty queue */2374if (is_flush && txq_write_ptr == txq_read_ptr)2375goto out;23762377read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);23782379if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {2380IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",2381txq_id, ssn);2382goto out;2383}23842385if (read_ptr == tfd_num)2386goto out;23872388IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",2389txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);23902391/* Since we free until index _not_ inclusive, the one before index is2392* the last we will free. This one must be used2393*/2394last_to_free = iwl_txq_dec_wrap(trans, tfd_num);23952396if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {2397IWL_ERR(trans,2398"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",2399__func__, txq_id, last_to_free,2400trans->mac_cfg->base->max_tfd_queue_size,2401txq_write_ptr, txq_read_ptr);24022403iwl_op_mode_time_point(trans->op_mode,2404IWL_FW_INI_TIME_POINT_FAKE_TX,2405NULL);2406goto out;2407}24082409if (WARN_ON(!skb_queue_empty(skbs)))2410goto out;24112412for (;2413read_ptr != tfd_num;2414txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),2415read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {2416struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;2417struct sk_buff *skb = txq->entries[read_ptr].skb;24182419if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",2420read_ptr, txq_read_ptr, txq_id))2421continue;24222423iwl_pcie_free_tso_pages(trans, skb, cmd_meta);24242425__skb_queue_tail(skbs, skb);24262427txq->entries[read_ptr].skb = NULL;24282429if (!trans->mac_cfg->gen2)2430iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,2431txq_read_ptr);24322433iwl_txq_free_tfd(trans, txq, txq_read_ptr);2434}24352436spin_lock(&txq->lock);2437txq->read_ptr = txq_read_ptr;24382439iwl_txq_progress(txq);24402441if (iwl_txq_space(trans, txq) > txq->low_mark &&2442test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {2443struct sk_buff_head overflow_skbs;2444struct sk_buff *skb;24452446__skb_queue_head_init(&overflow_skbs);2447skb_queue_splice_init(&txq->overflow_q,2448is_flush ? skbs : &overflow_skbs);24492450/*2451* We are going to transmit from the overflow queue.2452* Remember this state so that wait_for_txq_empty will know we2453* are adding more packets to the TFD queue. It cannot rely on2454* the state of &txq->overflow_q, as we just emptied it, but2455* haven't TXed the content yet.2456*/2457txq->overflow_tx = true;24582459/*2460* This is tricky: we are in reclaim path and are holding2461* reclaim_lock, so noone will try to access the txq data2462* from that path. We stopped tx, so we can't have tx as well.2463* Bottom line, we can unlock and re-lock later.2464*/2465spin_unlock(&txq->lock);24662467while ((skb = __skb_dequeue(&overflow_skbs))) {2468struct iwl_device_tx_cmd *dev_cmd_ptr;24692470dev_cmd_ptr = *(void **)((u8 *)skb->cb +2471trans->conf.cb_data_offs +2472sizeof(void *));24732474/*2475* Note that we can very well be overflowing again.2476* In that case, iwl_txq_space will be small again2477* and we won't wake mac80211's queue.2478*/2479iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);2480}24812482if (iwl_txq_space(trans, txq) > txq->low_mark)2483iwl_trans_pcie_wake_queue(trans, txq);24842485spin_lock(&txq->lock);2486txq->overflow_tx = false;2487}24882489spin_unlock(&txq->lock);2490out:2491spin_unlock_bh(&txq->reclaim_lock);2492}24932494/* Set wr_ptr of specific device and txq */2495void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)2496{2497struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2498struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];24992500spin_lock_bh(&txq->lock);25012502txq->write_ptr = ptr;2503txq->read_ptr = txq->write_ptr;25042505spin_unlock_bh(&txq->lock);2506}25072508void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,2509unsigned long txqs, bool freeze)2510{2511struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2512int queue;25132514for_each_set_bit(queue, &txqs, BITS_PER_LONG) {2515struct iwl_txq *txq = trans_pcie->txqs.txq[queue];2516unsigned long now;25172518spin_lock_bh(&txq->lock);25192520now = jiffies;25212522if (txq->frozen == freeze)2523goto next_queue;25242525IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",2526freeze ? "Freezing" : "Waking", queue);25272528txq->frozen = freeze;25292530if (txq->read_ptr == txq->write_ptr)2531goto next_queue;25322533if (freeze) {2534if (unlikely(time_after(now,2535txq->stuck_timer.expires))) {2536/*2537* The timer should have fired, maybe it is2538* spinning right now on the lock.2539*/2540goto next_queue;2541}2542/* remember how long until the timer fires */2543txq->frozen_expiry_remainder =2544txq->stuck_timer.expires - now;2545timer_delete(&txq->stuck_timer);2546goto next_queue;2547}25482549/*2550* Wake a non-empty queue -> arm timer with the2551* remainder before it froze2552*/2553mod_timer(&txq->stuck_timer,2554now + txq->frozen_expiry_remainder);25552556next_queue:2557spin_unlock_bh(&txq->lock);2558}2559}25602561#define HOST_COMPLETE_TIMEOUT (2 * HZ)25622563static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,2564struct iwl_host_cmd *cmd,2565const char *cmd_str)2566{2567struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2568struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];2569int cmd_idx;2570int ret;25712572IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);25732574if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,2575&trans->status),2576"Command %s: a command is already active!\n", cmd_str))2577return -EIO;25782579IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);25802581if (trans->mac_cfg->gen2)2582cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);2583else2584cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);25852586if (cmd_idx < 0) {2587ret = cmd_idx;2588clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);2589IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",2590cmd_str, ret);2591return ret;2592}25932594ret = wait_event_timeout(trans_pcie->wait_command_queue,2595!test_bit(STATUS_SYNC_HCMD_ACTIVE,2596&trans->status),2597HOST_COMPLETE_TIMEOUT);2598if (!ret) {2599IWL_ERR(trans, "Error sending %s: time out after %dms.\n",2600cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));26012602IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",2603txq->read_ptr, txq->write_ptr);26042605clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);2606IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",2607cmd_str);2608ret = -ETIMEDOUT;26092610iwl_trans_pcie_sync_nmi(trans);2611goto cancel;2612}26132614if (test_bit(STATUS_FW_ERROR, &trans->status)) {2615if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,2616&trans->status)) {2617IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);2618dump_stack();2619}2620ret = -EIO;2621goto cancel;2622}26232624if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&2625test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {2626IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");2627ret = -ERFKILL;2628goto cancel;2629}26302631if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {2632IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);2633ret = -EIO;2634goto cancel;2635}26362637return 0;26382639cancel:2640if (cmd->flags & CMD_WANT_SKB) {2641/*2642* Cancel the CMD_WANT_SKB flag for the cmd in the2643* TX cmd queue. Otherwise in case the cmd comes2644* in later, it will possibly set an invalid2645* address (cmd->meta.source).2646*/2647txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;2648}26492650if (cmd->resp_pkt) {2651iwl_free_resp(cmd);2652cmd->resp_pkt = NULL;2653}26542655return ret;2656}26572658int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,2659struct iwl_host_cmd *cmd)2660{2661const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);26622663/* Make sure the NIC is still alive in the bus */2664if (test_bit(STATUS_TRANS_DEAD, &trans->status))2665return -ENODEV;26662667if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&2668test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {2669IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",2670cmd->id);2671return -ERFKILL;2672}26732674if (cmd->flags & CMD_ASYNC) {2675int ret;26762677IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str);26782679/* An asynchronous command can not expect an SKB to be set. */2680if (WARN_ON(cmd->flags & CMD_WANT_SKB))2681return -EINVAL;26822683if (trans->mac_cfg->gen2)2684ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);2685else2686ret = iwl_pcie_enqueue_hcmd(trans, cmd);26872688if (ret < 0) {2689IWL_ERR(trans,2690"Error sending %s: enqueue_hcmd failed: %d\n",2691iwl_get_cmd_string(trans, cmd->id), ret);2692return ret;2693}2694return 0;2695}26962697return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str);2698}269927002701