Path: blob/main/sys/contrib/dev/iwlwifi/pcie/gen1_2/rx.c
48406 views
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause1/*2* Copyright (C) 2003-2014, 2018-2024 Intel Corporation3* Copyright (C) 2013-2015 Intel Mobile Communications GmbH4* Copyright (C) 2016-2017 Intel Deutschland GmbH5*/6#include <linux/sched.h>7#include <linux/wait.h>8#include <linux/gfp.h>910#include "iwl-prph.h"11#include "iwl-io.h"12#include "internal.h"13#include "iwl-op-mode.h"14#include "pcie/iwl-context-info-v2.h"15#include "fw/dbg.h"1617/******************************************************************************18*19* RX path functions20*21******************************************************************************/2223/*24* Rx theory of operation25*26* Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),27* each of which point to Receive Buffers to be filled by the NIC. These get28* used not only for Rx frames, but for any command response or notification29* from the NIC. The driver and NIC manage the Rx buffers by means30* of indexes into the circular buffer.31*32* Rx Queue Indexes33* The host/firmware share two index registers for managing the Rx buffers.34*35* The READ index maps to the first position that the firmware may be writing36* to -- the driver can read up to (but not including) this position and get37* good data.38* The READ index is managed by the firmware once the card is enabled.39*40* The WRITE index maps to the last position the driver has read from -- the41* position preceding WRITE is the last slot the firmware can place a packet.42*43* The queue is empty (no good data) if WRITE = READ - 1, and is full if44* WRITE = READ.45*46* During initialization, the host sets up the READ queue position to the first47* INDEX position, and WRITE to the last (READ - 1 wrapped)48*49* When the firmware places a packet in a buffer, it will advance the READ index50* and fire the RX interrupt. The driver can then query the READ index and51* process as many packets as possible, moving the WRITE index forward as it52* resets the Rx queue buffers with new memory.53*54* The management in the driver is as follows:55* + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.56* When the interrupt handler is called, the request is processed.57* The page is either stolen - transferred to the upper layer58* or reused - added immediately to the iwl->rxq->rx_free list.59* + When the page is stolen - the driver updates the matching queue's used60* count, detaches the RBD and transfers it to the queue used list.61* When there are two used RBDs - they are transferred to the allocator empty62* list. Work is then scheduled for the allocator to start allocating63* eight buffers.64* When there are another 6 used RBDs - they are transferred to the allocator65* empty list and the driver tries to claim the pre-allocated buffers and66* add them to iwl->rxq->rx_free. If it fails - it continues to claim them67* until ready.68* When there are 8+ buffers in the free list - either from allocation or from69* 8 reused unstolen pages - restock is called to update the FW and indexes.70* + In order to make sure the allocator always has RBDs to use for allocation71* the allocator has initial pool in the size of num_queues*(8-2) - the72* maximum missing RBDs per allocation request (request posted with 273* empty RBDs, there is no guarantee when the other 6 RBDs are supplied).74* The queues supplies the recycle of the rest of the RBDs.75* + A received packet is processed and handed to the kernel network stack,76* detached from the iwl->rxq. The driver 'processed' index is updated.77* + If there are no allocated buffers in iwl->rxq->rx_free,78* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.79* If there were enough free buffers and RX_STALLED is set it is cleared.80*81*82* Driver sequence:83*84* iwl_rxq_alloc() Allocates rx_free85* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls86* iwl_pcie_rxq_restock.87* Used only during initialization.88* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx89* queue, updates firmware pointers, and updates90* the WRITE index.91* iwl_pcie_rx_allocator() Background work for allocating pages.92*93* -- enable interrupts --94* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the95* READ INDEX, detaching the SKB from the pool.96* Moves the packet buffer from queue to rx_used.97* Posts and claims requests to the allocator.98* Calls iwl_pcie_rxq_restock to refill any empty99* slots.100*101* RBD life-cycle:102*103* Init:104* rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue105*106* Regular Receive interrupt:107* Page Stolen:108* rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->109* allocator.rbd_allocated -> rxq.rx_free -> rxq.queue110* Page not Stolen:111* rxq.queue -> rxq.rx_free -> rxq.queue112* ...113*114*/115116/*117* iwl_rxq_space - Return number of free slots available in queue.118*/119static int iwl_rxq_space(const struct iwl_rxq *rxq)120{121/* Make sure rx queue size is a power of 2 */122WARN_ON(rxq->queue_size & (rxq->queue_size - 1));123124/*125* There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity126* between empty and completely full queues.127* The following is equivalent to modulo by RX_QUEUE_SIZE and is well128* defined for negative dividends.129*/130return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);131}132133/*134* iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr135*/136static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)137{138return cpu_to_le32((u32)(dma_addr >> 8));139}140141/*142* iwl_pcie_rx_stop - stops the Rx DMA143*/144int iwl_pcie_rx_stop(struct iwl_trans *trans)145{146if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {147/* TODO: remove this once fw does it */148iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0);149return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210,150RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);151} else if (trans->mac_cfg->mq_rx_supported) {152iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);153return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,154RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);155} else {156iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);157return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,158FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,1591000);160}161}162163/*164* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue165*/166static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,167struct iwl_rxq *rxq)168{169u32 reg;170171lockdep_assert_held(&rxq->lock);172173/*174* explicitly wake up the NIC if:175* 1. shadow registers aren't enabled176* 2. there is a chance that the NIC is asleep177*/178if (!trans->mac_cfg->base->shadow_reg_enable &&179test_bit(STATUS_TPOWER_PMI, &trans->status)) {180reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);181182if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {183IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",184reg);185iwl_set_bit(trans, CSR_GP_CNTRL,186CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);187rxq->need_update = true;188return;189}190}191192rxq->write_actual = round_down(rxq->write, 8);193if (!trans->mac_cfg->mq_rx_supported)194iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);195else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)196iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |197HBUS_TARG_WRPTR_RX_Q(rxq->id));198else199iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),200rxq->write_actual);201}202203static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)204{205struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);206int i;207208for (i = 0; i < trans->info.num_rxqs; i++) {209struct iwl_rxq *rxq = &trans_pcie->rxq[i];210211if (!rxq->need_update)212continue;213spin_lock_bh(&rxq->lock);214iwl_pcie_rxq_inc_wr_ptr(trans, rxq);215rxq->need_update = false;216spin_unlock_bh(&rxq->lock);217}218}219220static void iwl_pcie_restock_bd(struct iwl_trans *trans,221struct iwl_rxq *rxq,222struct iwl_rx_mem_buffer *rxb)223{224if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {225struct iwl_rx_transfer_desc *bd = rxq->bd;226227BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));228229bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);230bd[rxq->write].rbid = cpu_to_le16(rxb->vid);231} else {232__le64 *bd = rxq->bd;233234bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);235}236237IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",238(u32)rxb->vid, rxq->id, rxq->write);239}240241/*242* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx243*/244static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,245struct iwl_rxq *rxq)246{247struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);248struct iwl_rx_mem_buffer *rxb;249250/*251* If the device isn't enabled - no need to try to add buffers...252* This can happen when we stop the device and still have an interrupt253* pending. We stop the APM before we sync the interrupts because we254* have to (see comment there). On the other hand, since the APM is255* stopped, we cannot access the HW (in particular not prph).256* So don't try to restock if the APM has been already stopped.257*/258if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))259return;260261spin_lock_bh(&rxq->lock);262while (rxq->free_count) {263/* Get next free Rx buffer, remove from free list */264rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,265list);266list_del(&rxb->list);267rxb->invalid = false;268/* some low bits are expected to be unset (depending on hw) */269WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);270/* Point to Rx buffer via next RBD in circular buffer */271iwl_pcie_restock_bd(trans, rxq, rxb);272rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);273rxq->free_count--;274}275spin_unlock_bh(&rxq->lock);276277/*278* If we've added more space for the firmware to place data, tell it.279* Increment device's write pointer in multiples of 8.280*/281if (rxq->write_actual != (rxq->write & ~0x7)) {282spin_lock_bh(&rxq->lock);283iwl_pcie_rxq_inc_wr_ptr(trans, rxq);284spin_unlock_bh(&rxq->lock);285}286}287288/*289* iwl_pcie_rxsq_restock - restock implementation for single queue rx290*/291static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,292struct iwl_rxq *rxq)293{294struct iwl_rx_mem_buffer *rxb;295296/*297* If the device isn't enabled - not need to try to add buffers...298* This can happen when we stop the device and still have an interrupt299* pending. We stop the APM before we sync the interrupts because we300* have to (see comment there). On the other hand, since the APM is301* stopped, we cannot access the HW (in particular not prph).302* So don't try to restock if the APM has been already stopped.303*/304if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))305return;306307spin_lock_bh(&rxq->lock);308while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {309__le32 *bd = (__le32 *)rxq->bd;310/* The overwritten rxb must be a used one */311rxb = rxq->queue[rxq->write];312BUG_ON(rxb && rxb->page);313314/* Get next free Rx buffer, remove from free list */315rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,316list);317list_del(&rxb->list);318rxb->invalid = false;319320/* Point to Rx buffer via next RBD in circular buffer */321bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);322rxq->queue[rxq->write] = rxb;323rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;324rxq->free_count--;325}326spin_unlock_bh(&rxq->lock);327328/* If we've added more space for the firmware to place data, tell it.329* Increment device's write pointer in multiples of 8. */330if (rxq->write_actual != (rxq->write & ~0x7)) {331spin_lock_bh(&rxq->lock);332iwl_pcie_rxq_inc_wr_ptr(trans, rxq);333spin_unlock_bh(&rxq->lock);334}335}336337/*338* iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool339*340* If there are slots in the RX queue that need to be restocked,341* and we have free pre-allocated buffers, fill the ranks as much342* as we can, pulling from rx_free.343*344* This moves the 'write' index forward to catch up with 'processed', and345* also updates the memory address in the firmware to reference the new346* target buffer.347*/348static349void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)350{351if (trans->mac_cfg->mq_rx_supported)352iwl_pcie_rxmq_restock(trans, rxq);353else354iwl_pcie_rxsq_restock(trans, rxq);355}356357/*358* iwl_pcie_rx_alloc_page - allocates and returns a page.359*360*/361static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,362u32 *offset, gfp_t priority)363{364struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);365unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;366unsigned int rbsize = trans_pcie->rx_buf_bytes;367struct page *page;368gfp_t gfp_mask = priority;369370if (trans_pcie->rx_page_order > 0)371gfp_mask |= __GFP_COMP;372373if (trans_pcie->alloc_page) {374spin_lock_bh(&trans_pcie->alloc_page_lock);375/* recheck */376if (trans_pcie->alloc_page) {377*offset = trans_pcie->alloc_page_used;378page = trans_pcie->alloc_page;379trans_pcie->alloc_page_used += rbsize;380if (trans_pcie->alloc_page_used >= allocsize)381trans_pcie->alloc_page = NULL;382else383get_page(page);384spin_unlock_bh(&trans_pcie->alloc_page_lock);385return page;386}387spin_unlock_bh(&trans_pcie->alloc_page_lock);388}389390/* Alloc a new receive buffer */391page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);392if (!page) {393if (net_ratelimit())394IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",395trans_pcie->rx_page_order);396/*397* Issue an error if we don't have enough pre-allocated398* buffers.399*/400if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())401IWL_CRIT(trans,402"Failed to alloc_pages\n");403return NULL;404}405406if (2 * rbsize <= allocsize) {407spin_lock_bh(&trans_pcie->alloc_page_lock);408if (!trans_pcie->alloc_page) {409get_page(page);410trans_pcie->alloc_page = page;411trans_pcie->alloc_page_used = rbsize;412}413spin_unlock_bh(&trans_pcie->alloc_page_lock);414}415416*offset = 0;417return page;418}419420/*421* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD422*423* A used RBD is an Rx buffer that has been given to the stack. To use it again424* a page must be allocated and the RBD must point to the page. This function425* doesn't change the HW pointer but handles the list of pages that is used by426* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly427* allocated buffers.428*/429void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,430struct iwl_rxq *rxq)431{432struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);433struct iwl_rx_mem_buffer *rxb;434struct page *page;435436while (1) {437unsigned int offset;438439spin_lock_bh(&rxq->lock);440if (list_empty(&rxq->rx_used)) {441spin_unlock_bh(&rxq->lock);442return;443}444spin_unlock_bh(&rxq->lock);445446page = iwl_pcie_rx_alloc_page(trans, &offset, priority);447if (!page)448return;449450spin_lock_bh(&rxq->lock);451452if (list_empty(&rxq->rx_used)) {453spin_unlock_bh(&rxq->lock);454__free_pages(page, trans_pcie->rx_page_order);455return;456}457rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,458list);459list_del(&rxb->list);460spin_unlock_bh(&rxq->lock);461462BUG_ON(rxb->page);463rxb->page = page;464rxb->offset = offset;465/* Get physical address of the RB */466rxb->page_dma =467dma_map_page(trans->dev, page, rxb->offset,468trans_pcie->rx_buf_bytes,469DMA_FROM_DEVICE);470if (dma_mapping_error(trans->dev, rxb->page_dma)) {471rxb->page = NULL;472spin_lock_bh(&rxq->lock);473list_add(&rxb->list, &rxq->rx_used);474spin_unlock_bh(&rxq->lock);475__free_pages(page, trans_pcie->rx_page_order);476return;477}478479spin_lock_bh(&rxq->lock);480481list_add_tail(&rxb->list, &rxq->rx_free);482rxq->free_count++;483484spin_unlock_bh(&rxq->lock);485}486}487488void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)489{490struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);491int i;492493if (!trans_pcie->rx_pool)494return;495496for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {497if (!trans_pcie->rx_pool[i].page)498continue;499dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,500trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);501__free_pages(trans_pcie->rx_pool[i].page,502trans_pcie->rx_page_order);503trans_pcie->rx_pool[i].page = NULL;504}505}506507/*508* iwl_pcie_rx_allocator - Allocates pages in the background for RX queues509*510* Allocates for each received request 8 pages511* Called as a scheduled work item.512*/513static void iwl_pcie_rx_allocator(struct iwl_trans *trans)514{515struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);516struct iwl_rb_allocator *rba = &trans_pcie->rba;517struct list_head local_empty;518int pending = atomic_read(&rba->req_pending);519520IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);521522/* If we were scheduled - there is at least one request */523spin_lock_bh(&rba->lock);524/* swap out the rba->rbd_empty to a local list */525list_replace_init(&rba->rbd_empty, &local_empty);526spin_unlock_bh(&rba->lock);527528while (pending) {529int i;530LIST_HEAD(local_allocated);531gfp_t gfp_mask = GFP_KERNEL;532533/* Do not post a warning if there are only a few requests */534if (pending < RX_PENDING_WATERMARK)535gfp_mask |= __GFP_NOWARN;536537for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {538struct iwl_rx_mem_buffer *rxb;539struct page *page;540541/* List should never be empty - each reused RBD is542* returned to the list, and initial pool covers any543* possible gap between the time the page is allocated544* to the time the RBD is added.545*/546BUG_ON(list_empty(&local_empty));547/* Get the first rxb from the rbd list */548rxb = list_first_entry(&local_empty,549struct iwl_rx_mem_buffer, list);550BUG_ON(rxb->page);551552/* Alloc a new receive buffer */553page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,554gfp_mask);555if (!page)556continue;557rxb->page = page;558559/* Get physical address of the RB */560rxb->page_dma = dma_map_page(trans->dev, page,561rxb->offset,562trans_pcie->rx_buf_bytes,563DMA_FROM_DEVICE);564if (dma_mapping_error(trans->dev, rxb->page_dma)) {565rxb->page = NULL;566__free_pages(page, trans_pcie->rx_page_order);567continue;568}569570/* move the allocated entry to the out list */571list_move(&rxb->list, &local_allocated);572i++;573}574575atomic_dec(&rba->req_pending);576pending--;577578if (!pending) {579pending = atomic_read(&rba->req_pending);580if (pending)581IWL_DEBUG_TPT(trans,582"Got more pending allocation requests = %d\n",583pending);584}585586spin_lock_bh(&rba->lock);587/* add the allocated rbds to the allocator allocated list */588list_splice_tail(&local_allocated, &rba->rbd_allocated);589/* get more empty RBDs for current pending requests */590list_splice_tail_init(&rba->rbd_empty, &local_empty);591spin_unlock_bh(&rba->lock);592593atomic_inc(&rba->req_ready);594595}596597spin_lock_bh(&rba->lock);598/* return unused rbds to the allocator empty list */599list_splice_tail(&local_empty, &rba->rbd_empty);600spin_unlock_bh(&rba->lock);601602IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);603}604605/*606* iwl_pcie_rx_allocator_get - returns the pre-allocated pages607.*608.* Called by queue when the queue posted allocation request and609* has freed 8 RBDs in order to restock itself.610* This function directly moves the allocated RBs to the queue's ownership611* and updates the relevant counters.612*/613static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,614struct iwl_rxq *rxq)615{616struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);617struct iwl_rb_allocator *rba = &trans_pcie->rba;618int i;619620lockdep_assert_held(&rxq->lock);621622/*623* atomic_dec_if_positive returns req_ready - 1 for any scenario.624* If req_ready is 0 atomic_dec_if_positive will return -1 and this625* function will return early, as there are no ready requests.626* atomic_dec_if_positive will perofrm the *actual* decrement only if627* req_ready > 0, i.e. - there are ready requests and the function628* hands one request to the caller.629*/630if (atomic_dec_if_positive(&rba->req_ready) < 0)631return;632633spin_lock(&rba->lock);634for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {635/* Get next free Rx buffer, remove it from free list */636struct iwl_rx_mem_buffer *rxb =637list_first_entry(&rba->rbd_allocated,638struct iwl_rx_mem_buffer, list);639640list_move(&rxb->list, &rxq->rx_free);641}642spin_unlock(&rba->lock);643644rxq->used_count -= RX_CLAIM_REQ_ALLOC;645rxq->free_count += RX_CLAIM_REQ_ALLOC;646}647648void iwl_pcie_rx_allocator_work(struct work_struct *data)649{650struct iwl_rb_allocator *rba_p =651container_of(data, struct iwl_rb_allocator, rx_alloc);652struct iwl_trans_pcie *trans_pcie =653container_of(rba_p, struct iwl_trans_pcie, rba);654655iwl_pcie_rx_allocator(trans_pcie->trans);656}657658static int iwl_pcie_free_bd_size(struct iwl_trans *trans)659{660if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)661return sizeof(struct iwl_rx_transfer_desc);662663return trans->mac_cfg->mq_rx_supported ?664sizeof(__le64) : sizeof(__le32);665}666667static int iwl_pcie_used_bd_size(struct iwl_trans *trans)668{669if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)670return sizeof(struct iwl_rx_completion_desc_bz);671672if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)673return sizeof(struct iwl_rx_completion_desc);674675return sizeof(__le32);676}677678static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,679struct iwl_rxq *rxq)680{681int free_size = iwl_pcie_free_bd_size(trans);682683if (rxq->bd)684dma_free_coherent(trans->dev,685free_size * rxq->queue_size,686rxq->bd, rxq->bd_dma);687rxq->bd_dma = 0;688rxq->bd = NULL;689690rxq->rb_stts_dma = 0;691rxq->rb_stts = NULL;692693if (rxq->used_bd)694dma_free_coherent(trans->dev,695iwl_pcie_used_bd_size(trans) *696rxq->queue_size,697rxq->used_bd, rxq->used_bd_dma);698rxq->used_bd_dma = 0;699rxq->used_bd = NULL;700}701702static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)703{704bool use_rx_td = (trans->mac_cfg->device_family >=705IWL_DEVICE_FAMILY_AX210);706707if (use_rx_td)708return sizeof(__le16);709710return sizeof(struct iwl_rb_status);711}712713static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,714struct iwl_rxq *rxq)715{716struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);717size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);718struct device *dev = trans->dev;719int i;720int free_size;721722spin_lock_init(&rxq->lock);723if (trans->mac_cfg->mq_rx_supported)724rxq->queue_size = iwl_trans_get_num_rbds(trans);725else726rxq->queue_size = RX_QUEUE_SIZE;727728free_size = iwl_pcie_free_bd_size(trans);729730/*731* Allocate the circular buffer of Read Buffer Descriptors732* (RBDs)733*/734rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,735&rxq->bd_dma, GFP_KERNEL);736if (!rxq->bd)737goto err;738739if (trans->mac_cfg->mq_rx_supported) {740rxq->used_bd = dma_alloc_coherent(dev,741iwl_pcie_used_bd_size(trans) *742rxq->queue_size,743&rxq->used_bd_dma,744GFP_KERNEL);745if (!rxq->used_bd)746goto err;747}748749rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;750rxq->rb_stts_dma =751trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;752753return 0;754755err:756for (i = 0; i < trans->info.num_rxqs; i++) {757struct iwl_rxq *rxq = &trans_pcie->rxq[i];758759iwl_pcie_free_rxq_dma(trans, rxq);760}761762return -ENOMEM;763}764765static int iwl_pcie_rx_alloc(struct iwl_trans *trans)766{767struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);768size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);769struct iwl_rb_allocator *rba = &trans_pcie->rba;770int i, ret;771772if (WARN_ON(trans_pcie->rxq))773return -EINVAL;774775trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq),776GFP_KERNEL);777trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),778sizeof(trans_pcie->rx_pool[0]),779GFP_KERNEL);780trans_pcie->global_table =781kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),782sizeof(trans_pcie->global_table[0]),783GFP_KERNEL);784if (!trans_pcie->rxq || !trans_pcie->rx_pool ||785!trans_pcie->global_table) {786ret = -ENOMEM;787goto err;788}789790spin_lock_init(&rba->lock);791792/*793* Allocate the driver's pointer to receive buffer status.794* Allocate for all queues continuously (HW requirement).795*/796trans_pcie->base_rb_stts =797dma_alloc_coherent(trans->dev,798rb_stts_size * trans->info.num_rxqs,799&trans_pcie->base_rb_stts_dma,800GFP_KERNEL);801if (!trans_pcie->base_rb_stts) {802ret = -ENOMEM;803goto err;804}805806for (i = 0; i < trans->info.num_rxqs; i++) {807struct iwl_rxq *rxq = &trans_pcie->rxq[i];808809rxq->id = i;810ret = iwl_pcie_alloc_rxq_dma(trans, rxq);811if (ret)812goto err;813}814return 0;815816err:817if (trans_pcie->base_rb_stts) {818dma_free_coherent(trans->dev,819rb_stts_size * trans->info.num_rxqs,820trans_pcie->base_rb_stts,821trans_pcie->base_rb_stts_dma);822trans_pcie->base_rb_stts = NULL;823trans_pcie->base_rb_stts_dma = 0;824}825kfree(trans_pcie->rx_pool);826trans_pcie->rx_pool = NULL;827kfree(trans_pcie->global_table);828trans_pcie->global_table = NULL;829kfree(trans_pcie->rxq);830trans_pcie->rxq = NULL;831832return ret;833}834835static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)836{837u32 rb_size;838const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */839840switch (trans->conf.rx_buf_size) {841case IWL_AMSDU_4K:842rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;843break;844case IWL_AMSDU_8K:845rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;846break;847case IWL_AMSDU_12K:848rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;849break;850default:851WARN_ON(1);852rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;853}854855if (!iwl_trans_grab_nic_access(trans))856return;857858/* Stop Rx DMA */859iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);860/* reset and flush pointers */861iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);862iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);863iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);864865/* Reset driver's Rx queue write index */866iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);867868/* Tell device where to find RBD circular buffer in DRAM */869iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,870(u32)(rxq->bd_dma >> 8));871872/* Tell device where in DRAM to update its Rx status */873iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,874rxq->rb_stts_dma >> 4);875876/* Enable Rx DMA877* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in878* the credit mechanism in 5000 HW RX FIFO879* Direct rx interrupts to hosts880* Rx buffer size 4 or 8k or 12k881* RB timeout 0x10882* 256 RBDs883*/884iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,885FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |886FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |887FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |888rb_size |889(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |890(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));891892iwl_trans_release_nic_access(trans);893894/* Set interrupt coalescing timer to default (2048 usecs) */895iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);896897/* W/A for interrupt coalescing bug in 7260 and 3160 */898if (trans->cfg->host_interrupt_operation_mode)899iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);900}901902static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)903{904struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);905u32 rb_size, enabled = 0;906int i;907908switch (trans->conf.rx_buf_size) {909case IWL_AMSDU_2K:910rb_size = RFH_RXF_DMA_RB_SIZE_2K;911break;912case IWL_AMSDU_4K:913rb_size = RFH_RXF_DMA_RB_SIZE_4K;914break;915case IWL_AMSDU_8K:916rb_size = RFH_RXF_DMA_RB_SIZE_8K;917break;918case IWL_AMSDU_12K:919rb_size = RFH_RXF_DMA_RB_SIZE_12K;920break;921default:922WARN_ON(1);923rb_size = RFH_RXF_DMA_RB_SIZE_4K;924}925926if (!iwl_trans_grab_nic_access(trans))927return;928929/* Stop Rx DMA */930iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);931/* disable free amd used rx queue operation */932iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);933934for (i = 0; i < trans->info.num_rxqs; i++) {935/* Tell device where to find RBD free table in DRAM */936iwl_write_prph64_no_grab(trans,937RFH_Q_FRBDCB_BA_LSB(i),938trans_pcie->rxq[i].bd_dma);939/* Tell device where to find RBD used table in DRAM */940iwl_write_prph64_no_grab(trans,941RFH_Q_URBDCB_BA_LSB(i),942trans_pcie->rxq[i].used_bd_dma);943/* Tell device where in DRAM to update its Rx status */944iwl_write_prph64_no_grab(trans,945RFH_Q_URBD_STTS_WPTR_LSB(i),946trans_pcie->rxq[i].rb_stts_dma);947/* Reset device indice tables */948iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);949iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);950iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);951952enabled |= BIT(i) | BIT(i + 16);953}954955/*956* Enable Rx DMA957* Rx buffer size 4 or 8k or 12k958* Min RB size 4 or 8959* Drop frames that exceed RB size960* 512 RBDs961*/962iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,963RFH_DMA_EN_ENABLE_VAL | rb_size |964RFH_RXF_DMA_MIN_RB_4_8 |965RFH_RXF_DMA_DROP_TOO_LARGE_MASK |966RFH_RXF_DMA_RBDCB_SIZE_512);967968/*969* Activate DMA snooping.970* Set RX DMA chunk size to 64B for IOSF and 128B for PCIe971* Default queue is 0972*/973iwl_write_prph_no_grab(trans, RFH_GEN_CFG,974RFH_GEN_CFG_RFH_DMA_SNOOP |975RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |976RFH_GEN_CFG_SERVICE_DMA_SNOOP |977RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,978trans->mac_cfg->integrated ?979RFH_GEN_CFG_RB_CHUNK_SIZE_64 :980RFH_GEN_CFG_RB_CHUNK_SIZE_128));981/* Enable the relevant rx queues */982iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);983984iwl_trans_release_nic_access(trans);985986/* Set interrupt coalescing timer to default (2048 usecs) */987iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);988}989990void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)991{992lockdep_assert_held(&rxq->lock);993994INIT_LIST_HEAD(&rxq->rx_free);995INIT_LIST_HEAD(&rxq->rx_used);996rxq->free_count = 0;997rxq->used_count = 0;998}9991000static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);10011002static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)1003{1004return *(struct iwl_trans_pcie **)netdev_priv(dev);1005}10061007static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)1008{1009struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);1010struct iwl_trans_pcie *trans_pcie;1011struct iwl_trans *trans;1012int ret;10131014trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);1015trans = trans_pcie->trans;10161017ret = iwl_pcie_rx_handle(trans, rxq->id, budget);10181019IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",1020rxq->id, ret, budget);10211022if (ret < budget) {1023spin_lock(&trans_pcie->irq_lock);1024if (test_bit(STATUS_INT_ENABLED, &trans->status))1025_iwl_enable_interrupts(trans);1026spin_unlock(&trans_pcie->irq_lock);10271028napi_complete_done(&rxq->napi, ret);1029}10301031return ret;1032}10331034static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)1035{1036struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);1037struct iwl_trans_pcie *trans_pcie;1038struct iwl_trans *trans;1039int ret;10401041trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);1042trans = trans_pcie->trans;10431044ret = iwl_pcie_rx_handle(trans, rxq->id, budget);1045IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,1046budget);10471048if (ret < budget) {1049int irq_line = rxq->id;10501051/* FIRST_RSS is shared with line 0 */1052if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&1053rxq->id == 1)1054irq_line = 0;10551056spin_lock(&trans_pcie->irq_lock);1057iwl_pcie_clear_irq(trans, irq_line);1058spin_unlock(&trans_pcie->irq_lock);10591060napi_complete_done(&rxq->napi, ret);1061}10621063return ret;1064}10651066void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)1067{1068struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1069int i;10701071if (unlikely(!trans_pcie->rxq))1072return;10731074for (i = 0; i < trans->info.num_rxqs; i++) {1075struct iwl_rxq *rxq = &trans_pcie->rxq[i];10761077if (rxq && rxq->napi.poll)1078napi_synchronize(&rxq->napi);1079}1080}10811082static int _iwl_pcie_rx_init(struct iwl_trans *trans)1083{1084struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1085struct iwl_rxq *def_rxq;1086struct iwl_rb_allocator *rba = &trans_pcie->rba;1087int i, err, queue_size, allocator_pool_size, num_alloc;10881089if (!trans_pcie->rxq) {1090err = iwl_pcie_rx_alloc(trans);1091if (err)1092return err;1093}1094def_rxq = trans_pcie->rxq;10951096cancel_work_sync(&rba->rx_alloc);10971098spin_lock_bh(&rba->lock);1099atomic_set(&rba->req_pending, 0);1100atomic_set(&rba->req_ready, 0);1101INIT_LIST_HEAD(&rba->rbd_allocated);1102INIT_LIST_HEAD(&rba->rbd_empty);1103spin_unlock_bh(&rba->lock);11041105/* free all first - we overwrite everything here */1106iwl_pcie_free_rbs_pool(trans);11071108for (i = 0; i < RX_QUEUE_SIZE; i++)1109def_rxq->queue[i] = NULL;11101111for (i = 0; i < trans->info.num_rxqs; i++) {1112struct iwl_rxq *rxq = &trans_pcie->rxq[i];11131114spin_lock_bh(&rxq->lock);1115/*1116* Set read write pointer to reflect that we have processed1117* and used all buffers, but have not restocked the Rx queue1118* with fresh buffers1119*/1120rxq->read = 0;1121rxq->write = 0;1122rxq->write_actual = 0;1123memset(rxq->rb_stts, 0,1124(trans->mac_cfg->device_family >=1125IWL_DEVICE_FAMILY_AX210) ?1126sizeof(__le16) : sizeof(struct iwl_rb_status));11271128iwl_pcie_rx_init_rxb_lists(rxq);11291130spin_unlock_bh(&rxq->lock);11311132if (!rxq->napi.poll) {1133int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;11341135if (trans_pcie->msix_enabled)1136poll = iwl_pcie_napi_poll_msix;11371138netif_napi_add(trans_pcie->napi_dev, &rxq->napi,1139poll);1140napi_enable(&rxq->napi);1141}11421143}11441145/* move the pool to the default queue and allocator ownerships */1146queue_size = trans->mac_cfg->mq_rx_supported ?1147trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;1148allocator_pool_size = trans->info.num_rxqs *1149(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);1150num_alloc = queue_size + allocator_pool_size;11511152for (i = 0; i < num_alloc; i++) {1153struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];11541155if (i < allocator_pool_size)1156list_add(&rxb->list, &rba->rbd_empty);1157else1158list_add(&rxb->list, &def_rxq->rx_used);1159trans_pcie->global_table[i] = rxb;1160rxb->vid = (u16)(i + 1);1161rxb->invalid = true;1162}11631164iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);11651166return 0;1167}11681169int iwl_pcie_rx_init(struct iwl_trans *trans)1170{1171struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1172int ret = _iwl_pcie_rx_init(trans);11731174if (ret)1175return ret;11761177if (trans->mac_cfg->mq_rx_supported)1178iwl_pcie_rx_mq_hw_init(trans);1179else1180iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);11811182iwl_pcie_rxq_restock(trans, trans_pcie->rxq);11831184spin_lock_bh(&trans_pcie->rxq->lock);1185iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);1186spin_unlock_bh(&trans_pcie->rxq->lock);11871188return 0;1189}11901191int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)1192{1193/* Set interrupt coalescing timer to default (2048 usecs) */1194iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);11951196/*1197* We don't configure the RFH.1198* Restock will be done at alive, after firmware configured the RFH.1199*/1200return _iwl_pcie_rx_init(trans);1201}12021203void iwl_pcie_rx_free(struct iwl_trans *trans)1204{1205struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1206size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);1207struct iwl_rb_allocator *rba = &trans_pcie->rba;1208int i;12091210/*1211* if rxq is NULL, it means that nothing has been allocated,1212* exit now1213*/1214if (!trans_pcie->rxq) {1215IWL_DEBUG_INFO(trans, "Free NULL rx context\n");1216return;1217}12181219cancel_work_sync(&rba->rx_alloc);12201221iwl_pcie_free_rbs_pool(trans);12221223if (trans_pcie->base_rb_stts) {1224dma_free_coherent(trans->dev,1225rb_stts_size * trans->info.num_rxqs,1226trans_pcie->base_rb_stts,1227trans_pcie->base_rb_stts_dma);1228trans_pcie->base_rb_stts = NULL;1229trans_pcie->base_rb_stts_dma = 0;1230}12311232for (i = 0; i < trans->info.num_rxqs; i++) {1233struct iwl_rxq *rxq = &trans_pcie->rxq[i];12341235iwl_pcie_free_rxq_dma(trans, rxq);12361237if (rxq->napi.poll) {1238napi_disable(&rxq->napi);1239netif_napi_del(&rxq->napi);1240}1241}1242kfree(trans_pcie->rx_pool);1243kfree(trans_pcie->global_table);1244kfree(trans_pcie->rxq);12451246if (trans_pcie->alloc_page)1247__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);1248}12491250static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,1251struct iwl_rb_allocator *rba)1252{1253spin_lock(&rba->lock);1254list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);1255spin_unlock(&rba->lock);1256}12571258/*1259* iwl_pcie_rx_reuse_rbd - Recycle used RBDs1260*1261* Called when a RBD can be reused. The RBD is transferred to the allocator.1262* When there are 2 empty RBDs - a request for allocation is posted1263*/1264static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,1265struct iwl_rx_mem_buffer *rxb,1266struct iwl_rxq *rxq, bool emergency)1267{1268struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1269struct iwl_rb_allocator *rba = &trans_pcie->rba;12701271/* Move the RBD to the used list, will be moved to allocator in batches1272* before claiming or posting a request*/1273list_add_tail(&rxb->list, &rxq->rx_used);12741275if (unlikely(emergency))1276return;12771278/* Count the allocator owned RBDs */1279rxq->used_count++;12801281/* If we have RX_POST_REQ_ALLOC new released rx buffers -1282* issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is1283* used for the case we failed to claim RX_CLAIM_REQ_ALLOC,1284* after but we still need to post another request.1285*/1286if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {1287/* Move the 2 RBDs to the allocator ownership.1288Allocator has another 6 from pool for the request completion*/1289iwl_pcie_rx_move_to_allocator(rxq, rba);12901291atomic_inc(&rba->req_pending);1292queue_work(rba->alloc_wq, &rba->rx_alloc);1293}1294}12951296static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,1297struct iwl_rxq *rxq,1298struct iwl_rx_mem_buffer *rxb,1299bool emergency,1300int i)1301{1302struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1303struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];1304bool page_stolen = false;1305int max_len = trans_pcie->rx_buf_bytes;1306u32 offset = 0;13071308if (WARN_ON(!rxb))1309return;13101311dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);13121313while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {1314struct iwl_rx_packet *pkt;1315bool reclaim;1316int len;1317struct iwl_rx_cmd_buffer rxcb = {1318._offset = rxb->offset + offset,1319._rx_page_order = trans_pcie->rx_page_order,1320._page = rxb->page,1321._page_stolen = false,1322.truesize = max_len,1323};13241325pkt = rxb_addr(&rxcb);13261327if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {1328IWL_DEBUG_RX(trans,1329"Q %d: RB end marker at offset %d\n",1330rxq->id, offset);1331break;1332}13331334WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>1335FH_RSCSR_RXQ_POS != rxq->id,1336"frame on invalid queue - is on %d and indicates %d\n",1337rxq->id,1338(le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>1339FH_RSCSR_RXQ_POS);13401341IWL_DEBUG_RX(trans,1342"Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",1343rxq->id, offset,1344iwl_get_cmd_string(trans,1345WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),1346pkt->hdr.group_id, pkt->hdr.cmd,1347le16_to_cpu(pkt->hdr.sequence));13481349len = iwl_rx_packet_len(pkt);1350len += sizeof(u32); /* account for status word */13511352offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);13531354/* check that what the device tells us made sense */1355if (len < sizeof(*pkt) || offset > max_len)1356break;13571358maybe_trace_iwlwifi_dev_rx(trans, pkt, len);13591360/* Reclaim a command buffer only if this packet is a response1361* to a (driver-originated) command.1362* If the packet (e.g. Rx frame) originated from uCode,1363* there is no command buffer to reclaim.1364* Ucode should set SEQ_RX_FRAME bit if ucode-originated,1365* but apparently a few don't get set; catch them here. */1366reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);1367if (reclaim && !pkt->hdr.group_id) {1368int i;13691370for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) {1371if (trans->conf.no_reclaim_cmds[i] ==1372pkt->hdr.cmd) {1373reclaim = false;1374break;1375}1376}1377}13781379if (rxq->id == IWL_DEFAULT_RX_QUEUE)1380iwl_op_mode_rx(trans->op_mode, &rxq->napi,1381&rxcb);1382else1383iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,1384&rxcb, rxq->id);13851386/*1387* After here, we should always check rxcb._page_stolen,1388* if it is true then one of the handlers took the page.1389*/13901391if (reclaim && txq) {1392u16 sequence = le16_to_cpu(pkt->hdr.sequence);1393int index = SEQ_TO_INDEX(sequence);1394int cmd_index = iwl_txq_get_cmd_index(txq, index);13951396kfree_sensitive(txq->entries[cmd_index].free_buf);1397txq->entries[cmd_index].free_buf = NULL;13981399/* Invoke any callbacks, transfer the buffer to caller,1400* and fire off the (possibly) blocking1401* iwl_trans_send_cmd()1402* as we reclaim the driver command queue */1403if (!rxcb._page_stolen)1404iwl_pcie_hcmd_complete(trans, &rxcb);1405else1406IWL_WARN(trans, "Claim null rxb?\n");1407}14081409page_stolen |= rxcb._page_stolen;1410if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)1411break;1412}14131414/* page was stolen from us -- free our reference */1415if (page_stolen) {1416__free_pages(rxb->page, trans_pcie->rx_page_order);1417rxb->page = NULL;1418}14191420/* Reuse the page if possible. For notification packets and1421* SKBs that fail to Rx correctly, add them back into the1422* rx_free list for reuse later. */1423if (rxb->page != NULL) {1424rxb->page_dma =1425dma_map_page(trans->dev, rxb->page, rxb->offset,1426trans_pcie->rx_buf_bytes,1427DMA_FROM_DEVICE);1428if (dma_mapping_error(trans->dev, rxb->page_dma)) {1429/*1430* free the page(s) as well to not break1431* the invariant that the items on the used1432* list have no page(s)1433*/1434__free_pages(rxb->page, trans_pcie->rx_page_order);1435rxb->page = NULL;1436iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);1437} else {1438list_add_tail(&rxb->list, &rxq->rx_free);1439rxq->free_count++;1440}1441} else1442iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);1443}14441445static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,1446struct iwl_rxq *rxq, int i,1447bool *join)1448{1449struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1450struct iwl_rx_mem_buffer *rxb;1451u16 vid;14521453BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);1454BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);14551456if (!trans->mac_cfg->mq_rx_supported) {1457rxb = rxq->queue[i];1458rxq->queue[i] = NULL;1459return rxb;1460}14611462if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {1463struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;14641465vid = le16_to_cpu(cd[i].rbid);1466*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;1467} else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {1468struct iwl_rx_completion_desc *cd = rxq->used_bd;14691470vid = le16_to_cpu(cd[i].rbid);1471*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;1472} else {1473__le32 *cd = rxq->used_bd;14741475vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */1476}14771478if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))1479goto out_err;14801481rxb = trans_pcie->global_table[vid - 1];1482if (rxb->invalid)1483goto out_err;14841485IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);14861487rxb->invalid = true;14881489return rxb;14901491out_err:1492WARN(1, "Invalid rxb from HW %u\n", (u32)vid);1493iwl_force_nmi(trans);1494return NULL;1495}14961497/*1498* iwl_pcie_rx_handle - Main entry function for receiving responses from fw1499*/1500static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)1501{1502struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1503struct iwl_rxq *rxq;1504u32 r, i, count = 0, handled = 0;1505bool emergency = false;15061507if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))1508return budget;15091510rxq = &trans_pcie->rxq[queue];15111512restart:1513spin_lock(&rxq->lock);1514/* uCode's read index (stored in shared DRAM) indicates the last Rx1515* buffer that the driver may process (last buffer filled by ucode). */1516r = iwl_get_closed_rb_stts(trans, rxq);1517i = rxq->read;15181519/* W/A 9000 device step A0 wrap-around bug */1520r &= (rxq->queue_size - 1);15211522/* Rx interrupt, but nothing sent from uCode */1523if (i == r)1524IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);15251526while (i != r && ++handled < budget) {1527struct iwl_rb_allocator *rba = &trans_pcie->rba;1528struct iwl_rx_mem_buffer *rxb;1529/* number of RBDs still waiting for page allocation */1530u32 rb_pending_alloc =1531atomic_read(&trans_pcie->rba.req_pending) *1532RX_CLAIM_REQ_ALLOC;1533bool join = false;15341535if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&1536!emergency)) {1537iwl_pcie_rx_move_to_allocator(rxq, rba);1538emergency = true;1539IWL_DEBUG_TPT(trans,1540"RX path is in emergency. Pending allocations %d\n",1541rb_pending_alloc);1542}15431544IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);15451546rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);1547if (!rxb)1548goto out;15491550if (unlikely(join || rxq->next_rb_is_fragment)) {1551rxq->next_rb_is_fragment = join;1552/*1553* We can only get a multi-RB in the following cases:1554* - firmware issue, sending a too big notification1555* - sniffer mode with a large A-MSDU1556* - large MTU frames (>2k)1557* since the multi-RB functionality is limited to newer1558* hardware that cannot put multiple entries into a1559* single RB.1560*1561* Right now, the higher layers aren't set up to deal1562* with that, so discard all of these.1563*/1564list_add_tail(&rxb->list, &rxq->rx_free);1565rxq->free_count++;1566} else {1567iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);1568}15691570i = (i + 1) & (rxq->queue_size - 1);15711572/*1573* If we have RX_CLAIM_REQ_ALLOC released rx buffers -1574* try to claim the pre-allocated buffers from the allocator.1575* If not ready - will try to reclaim next time.1576* There is no need to reschedule work - allocator exits only1577* on success1578*/1579if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)1580iwl_pcie_rx_allocator_get(trans, rxq);15811582if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {1583/* Add the remaining empty RBDs for allocator use */1584iwl_pcie_rx_move_to_allocator(rxq, rba);1585} else if (emergency) {1586count++;1587if (count == 8) {1588count = 0;1589if (rb_pending_alloc < rxq->queue_size / 3) {1590IWL_DEBUG_TPT(trans,1591"RX path exited emergency. Pending allocations %d\n",1592rb_pending_alloc);1593emergency = false;1594}15951596rxq->read = i;1597spin_unlock(&rxq->lock);1598iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);1599iwl_pcie_rxq_restock(trans, rxq);1600goto restart;1601}1602}1603}1604out:1605/* Backtrack one entry */1606rxq->read = i;1607spin_unlock(&rxq->lock);16081609/*1610* handle a case where in emergency there are some unallocated RBDs.1611* those RBDs are in the used list, but are not tracked by the queue's1612* used_count which counts allocator owned RBDs.1613* unallocated emergency RBDs must be allocated on exit, otherwise1614* when called again the function may not be in emergency mode and1615* they will be handed to the allocator with no tracking in the RBD1616* allocator counters, which will lead to them never being claimed back1617* by the queue.1618* by allocating them here, they are now in the queue free list, and1619* will be restocked by the next call of iwl_pcie_rxq_restock.1620*/1621if (unlikely(emergency && count))1622iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);16231624iwl_pcie_rxq_restock(trans, rxq);16251626return handled;1627}16281629static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)1630{1631u8 queue = entry->entry;1632struct msix_entry *entries = entry - queue;16331634return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);1635}16361637/*1638* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw1639* This interrupt handler should be used with RSS queue only.1640*/1641irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)1642{1643struct msix_entry *entry = dev_id;1644struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);1645struct iwl_trans *trans = trans_pcie->trans;1646struct iwl_rxq *rxq;16471648trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);16491650if (WARN_ON(entry->entry >= trans->info.num_rxqs))1651return IRQ_NONE;16521653if (!trans_pcie->rxq) {1654if (net_ratelimit())1655IWL_ERR(trans,1656"[%d] Got MSI-X interrupt before we have Rx queues\n",1657entry->entry);1658return IRQ_NONE;1659}16601661rxq = &trans_pcie->rxq[entry->entry];1662lock_map_acquire(&trans->sync_cmd_lockdep_map);1663IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);16641665local_bh_disable();1666if (!napi_schedule(&rxq->napi))1667iwl_pcie_clear_irq(trans, entry->entry);1668local_bh_enable();16691670lock_map_release(&trans->sync_cmd_lockdep_map);16711672return IRQ_HANDLED;1673}16741675/*1676* iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card1677*/1678static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)1679{1680struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1681int i;16821683/* W/A for WiFi/WiMAX coex and WiMAX own the RF */1684if (trans->cfg->internal_wimax_coex &&1685!trans->mac_cfg->base->apmg_not_supported &&1686(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &1687APMS_CLK_VAL_MRB_FUNC_MODE) ||1688(iwl_read_prph(trans, APMG_PS_CTRL_REG) &1689APMG_PS_CTRL_VAL_RESET_REQ))) {1690clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);1691iwl_op_mode_wimax_active(trans->op_mode);1692wake_up(&trans_pcie->wait_command_queue);1693return;1694}16951696for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {1697if (!trans_pcie->txqs.txq[i])1698continue;1699timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer);1700}17011702if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {1703u32 val = iwl_read32(trans, CSR_IPC_STATE);17041705if (val & CSR_IPC_STATE_TOP_RESET_REQ) {1706IWL_ERR(trans, "FW requested TOP reset for FSEQ\n");1707trans->do_top_reset = 1;1708}1709}17101711/* The STATUS_FW_ERROR bit is set in this function. This must happen1712* before we wake up the command caller, to ensure a proper cleanup. */1713iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);17141715clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);1716wake_up(&trans_pcie->wait_command_queue);1717}17181719static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)1720{1721u32 inta;17221723lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);17241725trace_iwlwifi_dev_irq(trans->dev);17261727/* Discover which interrupts are active/pending */1728inta = iwl_read32(trans, CSR_INT);17291730/* the thread will service interrupts and re-enable them */1731return inta;1732}17331734/* a device (PCI-E) page is 4096 bytes long */1735#define ICT_SHIFT 121736#define ICT_SIZE (1 << ICT_SHIFT)1737#define ICT_COUNT (ICT_SIZE / sizeof(u32))17381739/* interrupt handler using ict table, with this interrupt driver will1740* stop using INTA register to get device's interrupt, reading this register1741* is expensive, device will write interrupts in ICT dram table, increment1742* index then will fire interrupt to driver, driver will OR all ICT table1743* entries from current index up to table entry with 0 value. the result is1744* the interrupt we need to service, driver will set the entries back to 0 and1745* set index.1746*/1747static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)1748{1749struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1750u32 inta;1751u32 val = 0;1752u32 read;17531754trace_iwlwifi_dev_irq(trans->dev);17551756/* Ignore interrupt if there's nothing in NIC to service.1757* This may be due to IRQ shared with another device,1758* or due to sporadic interrupts thrown from our NIC. */1759read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);1760trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);1761if (!read)1762return 0;17631764/*1765* Collect all entries up to the first 0, starting from ict_index;1766* note we already read at ict_index.1767*/1768do {1769val |= read;1770IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",1771trans_pcie->ict_index, read);1772trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;1773trans_pcie->ict_index =1774((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));17751776read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);1777trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,1778read);1779} while (read);17801781/* We should not get this value, just ignore it. */1782if (val == 0xffffffff)1783val = 0;17841785/*1786* this is a w/a for a h/w bug. the h/w bug may cause the Rx bit1787* (bit 15 before shifting it to 31) to clear when using interrupt1788* coalescing. fortunately, bits 18 and 19 stay set when this happens1789* so we use them to decide on the real state of the Rx bit.1790* In order words, bit 15 is set if bit 18 or bit 19 are set.1791*/1792if (val & 0xC0000)1793val |= 0x8000;17941795inta = (0xff & val) | ((0xff00 & val) << 16);1796return inta;1797}17981799void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)1800{1801struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1802struct isr_statistics *isr_stats = &trans_pcie->isr_stats;1803bool hw_rfkill, prev, report;18041805mutex_lock(&trans_pcie->mutex);1806prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);1807hw_rfkill = iwl_is_rfkill_set(trans);1808if (hw_rfkill) {1809set_bit(STATUS_RFKILL_OPMODE, &trans->status);1810set_bit(STATUS_RFKILL_HW, &trans->status);1811}1812if (trans_pcie->opmode_down)1813report = hw_rfkill;1814else1815report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);18161817IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",1818hw_rfkill ? "disable radio" : "enable radio");18191820isr_stats->rfkill++;18211822if (prev != report)1823iwl_trans_pcie_rf_kill(trans, report, from_irq);1824mutex_unlock(&trans_pcie->mutex);18251826if (hw_rfkill) {1827if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,1828&trans->status))1829IWL_DEBUG_RF_KILL(trans,1830"Rfkill while SYNC HCMD in flight\n");1831wake_up(&trans_pcie->wait_command_queue);1832} else {1833clear_bit(STATUS_RFKILL_HW, &trans->status);1834if (trans_pcie->opmode_down)1835clear_bit(STATUS_RFKILL_OPMODE, &trans->status);1836}1837}18381839static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans)1840{1841struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1842u32 state;18431844if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {1845u32 val = iwl_read32(trans, CSR_IPC_STATE);18461847state = u32_get_bits(val, CSR_IPC_STATE_RESET);1848IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state);1849} else {1850state = CSR_IPC_STATE_RESET_SW_READY;1851}18521853switch (state) {1854case CSR_IPC_STATE_RESET_SW_READY:1855if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {1856IWL_DEBUG_ISR(trans, "Reset flow completed\n");1857trans_pcie->fw_reset_state = FW_RESET_OK;1858wake_up(&trans_pcie->fw_reset_waitq);1859break;1860}1861fallthrough;1862case CSR_IPC_STATE_RESET_TOP_READY:1863if (trans_pcie->fw_reset_state == FW_RESET_TOP_REQUESTED) {1864IWL_DEBUG_ISR(trans, "TOP Reset continues\n");1865trans_pcie->fw_reset_state = FW_RESET_OK;1866wake_up(&trans_pcie->fw_reset_waitq);1867break;1868}1869fallthrough;1870case CSR_IPC_STATE_RESET_NONE:1871IWL_FW_CHECK_FAILED(trans,1872"Invalid reset interrupt (state=%d)!\n",1873state);1874break;1875case CSR_IPC_STATE_RESET_TOP_FOLLOWER:1876if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {1877/* if we were in reset, wake that up */1878IWL_INFO(trans,1879"TOP reset from BT while doing reset\n");1880trans_pcie->fw_reset_state = FW_RESET_OK;1881wake_up(&trans_pcie->fw_reset_waitq);1882} else {1883IWL_INFO(trans, "TOP reset from BT\n");1884trans->state = IWL_TRANS_NO_FW;1885iwl_trans_schedule_reset(trans,1886IWL_ERR_TYPE_TOP_RESET_BY_BT);1887}1888break;1889}1890}18911892irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)1893{1894struct iwl_trans *trans = dev_id;1895struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1896struct isr_statistics *isr_stats = &trans_pcie->isr_stats;1897u32 inta = 0;1898u32 handled = 0;1899bool polling = false;19001901lock_map_acquire(&trans->sync_cmd_lockdep_map);19021903spin_lock_bh(&trans_pcie->irq_lock);19041905/* dram interrupt table not set yet,1906* use legacy interrupt.1907*/1908if (likely(trans_pcie->use_ict))1909inta = iwl_pcie_int_cause_ict(trans);1910else1911inta = iwl_pcie_int_cause_non_ict(trans);19121913#ifdef CONFIG_IWLWIFI_DEBUG1914if (iwl_have_debug_level(IWL_DL_ISR)) {1915IWL_DEBUG_ISR(trans,1916"ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",1917inta, trans_pcie->inta_mask,1918iwl_read32(trans, CSR_INT_MASK),1919iwl_read32(trans, CSR_FH_INT_STATUS));1920if (inta & (~trans_pcie->inta_mask))1921IWL_DEBUG_ISR(trans,1922"We got a masked interrupt (0x%08x)\n",1923inta & (~trans_pcie->inta_mask));1924}1925#endif19261927inta &= trans_pcie->inta_mask;19281929/*1930* Ignore interrupt if there's nothing in NIC to service.1931* This may be due to IRQ shared with another device,1932* or due to sporadic interrupts thrown from our NIC.1933*/1934if (unlikely(!inta)) {1935IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");1936/*1937* Re-enable interrupts here since we don't1938* have anything to service1939*/1940if (test_bit(STATUS_INT_ENABLED, &trans->status))1941_iwl_enable_interrupts(trans);1942spin_unlock_bh(&trans_pcie->irq_lock);1943lock_map_release(&trans->sync_cmd_lockdep_map);1944return IRQ_NONE;1945}19461947if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {1948/*1949* Hardware disappeared. It might have1950* already raised an interrupt.1951*/1952IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);1953spin_unlock_bh(&trans_pcie->irq_lock);1954goto out;1955}19561957/* Ack/clear/reset pending uCode interrupts.1958* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,1959*/1960/* There is a hardware bug in the interrupt mask function that some1961* interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if1962* they are disabled in the CSR_INT_MASK register. Furthermore the1963* ICT interrupt handling mechanism has another bug that might cause1964* these unmasked interrupts fail to be detected. We workaround the1965* hardware bugs here by ACKing all the possible interrupts so that1966* interrupt coalescing can still be achieved.1967*/1968iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);19691970#ifdef CONFIG_IWLWIFI_DEBUG1971if (iwl_have_debug_level(IWL_DL_ISR))1972IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",1973inta, iwl_read32(trans, CSR_INT_MASK));1974#endif19751976spin_unlock_bh(&trans_pcie->irq_lock);19771978/* Now service all interrupt bits discovered above. */1979if (inta & CSR_INT_BIT_HW_ERR) {1980IWL_ERR(trans, "Hardware error detected. Restarting.\n");19811982/* Tell the device to stop sending interrupts */1983iwl_disable_interrupts(trans);19841985isr_stats->hw++;1986iwl_pcie_irq_handle_error(trans);19871988handled |= CSR_INT_BIT_HW_ERR;19891990goto out;1991}19921993/* NIC fires this, but we don't use it, redundant with WAKEUP */1994if (inta & CSR_INT_BIT_SCD) {1995IWL_DEBUG_ISR(trans,1996"Scheduler finished to transmit the frame/frames.\n");1997isr_stats->sch++;1998}19992000/* Alive notification via Rx interrupt will do the real work */2001if (inta & CSR_INT_BIT_ALIVE) {2002IWL_DEBUG_ISR(trans, "Alive interrupt\n");2003isr_stats->alive++;2004if (trans->mac_cfg->gen2) {2005/*2006* We can restock, since firmware configured2007* the RFH2008*/2009iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);2010}20112012handled |= CSR_INT_BIT_ALIVE;2013}20142015if (inta & CSR_INT_BIT_RESET_DONE) {2016iwl_trans_pcie_handle_reset_interrupt(trans);2017handled |= CSR_INT_BIT_RESET_DONE;2018}20192020/* Safely ignore these bits for debug checks below */2021inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);20222023/* HW RF KILL switch toggled */2024if (inta & CSR_INT_BIT_RF_KILL) {2025iwl_pcie_handle_rfkill_irq(trans, true);2026handled |= CSR_INT_BIT_RF_KILL;2027}20282029/* Chip got too hot and stopped itself */2030if (inta & CSR_INT_BIT_CT_KILL) {2031IWL_ERR(trans, "Microcode CT kill error detected.\n");2032isr_stats->ctkill++;2033handled |= CSR_INT_BIT_CT_KILL;2034}20352036/* Error detected by uCode */2037if (inta & CSR_INT_BIT_SW_ERR) {2038IWL_ERR(trans, "Microcode SW error detected. "2039" Restarting 0x%X.\n", inta);2040isr_stats->sw++;2041if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {2042trans_pcie->fw_reset_state = FW_RESET_ERROR;2043wake_up(&trans_pcie->fw_reset_waitq);2044} else {2045iwl_pcie_irq_handle_error(trans);2046}2047handled |= CSR_INT_BIT_SW_ERR;2048}20492050/* uCode wakes up after power-down sleep */2051if (inta & CSR_INT_BIT_WAKEUP) {2052IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");2053iwl_pcie_rxq_check_wrptr(trans);2054iwl_pcie_txq_check_wrptrs(trans);20552056isr_stats->wakeup++;20572058handled |= CSR_INT_BIT_WAKEUP;2059}20602061/* All uCode command responses, including Tx command responses,2062* Rx "responses" (frame-received notification), and other2063* notifications from uCode come through here*/2064if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |2065CSR_INT_BIT_RX_PERIODIC)) {2066IWL_DEBUG_ISR(trans, "Rx interrupt\n");2067if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {2068handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);2069iwl_write32(trans, CSR_FH_INT_STATUS,2070CSR_FH_INT_RX_MASK);2071}2072if (inta & CSR_INT_BIT_RX_PERIODIC) {2073handled |= CSR_INT_BIT_RX_PERIODIC;2074iwl_write32(trans,2075CSR_INT, CSR_INT_BIT_RX_PERIODIC);2076}2077/* Sending RX interrupt require many steps to be done in the2078* device:2079* 1- write interrupt to current index in ICT table.2080* 2- dma RX frame.2081* 3- update RX shared data to indicate last write index.2082* 4- send interrupt.2083* This could lead to RX race, driver could receive RX interrupt2084* but the shared data changes does not reflect this;2085* periodic interrupt will detect any dangling Rx activity.2086*/20872088/* Disable periodic interrupt; we use it as just a one-shot. */2089iwl_write8(trans, CSR_INT_PERIODIC_REG,2090CSR_INT_PERIODIC_DIS);20912092/*2093* Enable periodic interrupt in 8 msec only if we received2094* real RX interrupt (instead of just periodic int), to catch2095* any dangling Rx interrupt. If it was just the periodic2096* interrupt, there was no dangling Rx activity, and no need2097* to extend the periodic interrupt; one-shot is enough.2098*/2099if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))2100iwl_write8(trans, CSR_INT_PERIODIC_REG,2101CSR_INT_PERIODIC_ENA);21022103isr_stats->rx++;21042105local_bh_disable();2106if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {2107polling = true;2108__napi_schedule(&trans_pcie->rxq[0].napi);2109}2110local_bh_enable();2111}21122113/* This "Tx" DMA channel is used only for loading uCode */2114if (inta & CSR_INT_BIT_FH_TX) {2115iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);2116IWL_DEBUG_ISR(trans, "uCode load interrupt\n");2117isr_stats->tx++;2118handled |= CSR_INT_BIT_FH_TX;2119/* Wake up uCode load routine, now that load is complete */2120trans_pcie->ucode_write_complete = true;2121wake_up(&trans_pcie->ucode_write_waitq);2122/* Wake up IMR write routine, now that write to SRAM is complete */2123if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {2124trans_pcie->imr_status = IMR_D2S_COMPLETED;2125wake_up(&trans_pcie->ucode_write_waitq);2126}2127}21282129if (inta & ~handled) {2130IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);2131isr_stats->unhandled++;2132}21332134if (inta & ~(trans_pcie->inta_mask)) {2135IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",2136inta & ~trans_pcie->inta_mask);2137}21382139if (!polling) {2140spin_lock_bh(&trans_pcie->irq_lock);2141/* only Re-enable all interrupt if disabled by irq */2142if (test_bit(STATUS_INT_ENABLED, &trans->status))2143_iwl_enable_interrupts(trans);2144/* we are loading the firmware, enable FH_TX interrupt only */2145else if (handled & CSR_INT_BIT_FH_TX)2146iwl_enable_fw_load_int(trans);2147/* Re-enable RF_KILL if it occurred */2148else if (handled & CSR_INT_BIT_RF_KILL)2149iwl_enable_rfkill_int(trans);2150/* Re-enable the ALIVE / Rx interrupt if it occurred */2151else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))2152iwl_enable_fw_load_int_ctx_info(trans, false);2153spin_unlock_bh(&trans_pcie->irq_lock);2154}21552156out:2157lock_map_release(&trans->sync_cmd_lockdep_map);2158return IRQ_HANDLED;2159}21602161/******************************************************************************2162*2163* ICT functions2164*2165******************************************************************************/21662167/* Free dram table */2168void iwl_pcie_free_ict(struct iwl_trans *trans)2169{2170struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);21712172if (trans_pcie->ict_tbl) {2173dma_free_coherent(trans->dev, ICT_SIZE,2174trans_pcie->ict_tbl,2175trans_pcie->ict_tbl_dma);2176trans_pcie->ict_tbl = NULL;2177trans_pcie->ict_tbl_dma = 0;2178}2179}21802181/*2182* allocate dram shared table, it is an aligned memory2183* block of ICT_SIZE.2184* also reset all data related to ICT table interrupt.2185*/2186int iwl_pcie_alloc_ict(struct iwl_trans *trans)2187{2188struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);21892190trans_pcie->ict_tbl =2191dma_alloc_coherent(trans->dev, ICT_SIZE,2192&trans_pcie->ict_tbl_dma, GFP_KERNEL);2193if (!trans_pcie->ict_tbl)2194return -ENOMEM;21952196/* just an API sanity check ... it is guaranteed to be aligned */2197if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {2198iwl_pcie_free_ict(trans);2199return -EINVAL;2200}22012202return 0;2203}22042205/* Device is going up inform it about using ICT interrupt table,2206* also we need to tell the driver to start using ICT interrupt.2207*/2208void iwl_pcie_reset_ict(struct iwl_trans *trans)2209{2210struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2211u32 val;22122213if (!trans_pcie->ict_tbl)2214return;22152216spin_lock_bh(&trans_pcie->irq_lock);2217_iwl_disable_interrupts(trans);22182219memset(trans_pcie->ict_tbl, 0, ICT_SIZE);22202221val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;22222223val |= CSR_DRAM_INT_TBL_ENABLE |2224CSR_DRAM_INIT_TBL_WRAP_CHECK |2225CSR_DRAM_INIT_TBL_WRITE_POINTER;22262227IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);22282229iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);2230trans_pcie->use_ict = true;2231trans_pcie->ict_index = 0;2232iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);2233_iwl_enable_interrupts(trans);2234spin_unlock_bh(&trans_pcie->irq_lock);2235}22362237/* Device is going down disable ict interrupt usage */2238void iwl_pcie_disable_ict(struct iwl_trans *trans)2239{2240struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);22412242spin_lock_bh(&trans_pcie->irq_lock);2243trans_pcie->use_ict = false;2244spin_unlock_bh(&trans_pcie->irq_lock);2245}22462247irqreturn_t iwl_pcie_isr(int irq, void *data)2248{2249struct iwl_trans *trans = data;22502251if (!trans)2252return IRQ_NONE;22532254/* Disable (but don't clear!) interrupts here to avoid2255* back-to-back ISRs and sporadic interrupts from our NIC.2256* If we have something to service, the tasklet will re-enable ints.2257* If we *don't* have something, we'll re-enable before leaving here.2258*/2259iwl_write32(trans, CSR_INT_MASK, 0x00000000);22602261return IRQ_WAKE_THREAD;2262}22632264irqreturn_t iwl_pcie_msix_isr(int irq, void *data)2265{2266return IRQ_WAKE_THREAD;2267}22682269irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)2270{2271struct msix_entry *entry = dev_id;2272struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);2273struct iwl_trans *trans = trans_pcie->trans;2274struct isr_statistics *isr_stats = &trans_pcie->isr_stats;2275u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;2276u32 inta_fh, inta_hw;2277bool polling = false;2278bool sw_err;22792280if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)2281inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;22822283if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)2284inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;22852286lock_map_acquire(&trans->sync_cmd_lockdep_map);22872288spin_lock_bh(&trans_pcie->irq_lock);2289inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);2290inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);2291/*2292* Clear causes registers to avoid being handling the same cause.2293*/2294iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);2295iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);2296spin_unlock_bh(&trans_pcie->irq_lock);22972298trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);22992300if (unlikely(!(inta_fh | inta_hw))) {2301IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");2302lock_map_release(&trans->sync_cmd_lockdep_map);2303return IRQ_NONE;2304}23052306#ifdef CONFIG_IWLWIFI_DEBUG2307if (iwl_have_debug_level(IWL_DL_ISR)) {2308IWL_DEBUG_ISR(trans,2309"ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",2310entry->entry, inta_fh, trans_pcie->fh_mask,2311iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));2312if (inta_fh & ~trans_pcie->fh_mask)2313IWL_DEBUG_ISR(trans,2314"We got a masked interrupt (0x%08x)\n",2315inta_fh & ~trans_pcie->fh_mask);2316}2317#endif23182319inta_fh &= trans_pcie->fh_mask;23202321if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&2322inta_fh & MSIX_FH_INT_CAUSES_Q0) {2323local_bh_disable();2324if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {2325polling = true;2326__napi_schedule(&trans_pcie->rxq[0].napi);2327}2328local_bh_enable();2329}23302331if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&2332inta_fh & MSIX_FH_INT_CAUSES_Q1) {2333local_bh_disable();2334if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {2335polling = true;2336__napi_schedule(&trans_pcie->rxq[1].napi);2337}2338local_bh_enable();2339}23402341/* This "Tx" DMA channel is used only for loading uCode */2342if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&2343trans_pcie->imr_status == IMR_D2S_REQUESTED) {2344IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");2345isr_stats->tx++;23462347/* Wake up IMR routine once write to SRAM is complete */2348if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {2349trans_pcie->imr_status = IMR_D2S_COMPLETED;2350wake_up(&trans_pcie->ucode_write_waitq);2351}2352} else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {2353IWL_DEBUG_ISR(trans, "uCode load interrupt\n");2354isr_stats->tx++;2355/*2356* Wake up uCode load routine,2357* now that load is complete2358*/2359trans_pcie->ucode_write_complete = true;2360wake_up(&trans_pcie->ucode_write_waitq);23612362/* Wake up IMR routine once write to SRAM is complete */2363if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {2364trans_pcie->imr_status = IMR_D2S_COMPLETED;2365wake_up(&trans_pcie->ucode_write_waitq);2366}2367}23682369if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)2370sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;2371else2372sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;23732374if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {2375IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",2376inta_hw);2377if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {2378trans->request_top_reset = 1;2379iwl_op_mode_nic_error(trans->op_mode,2380IWL_ERR_TYPE_TOP_FATAL_ERROR);2381iwl_trans_schedule_reset(trans,2382IWL_ERR_TYPE_TOP_FATAL_ERROR);2383}2384}23852386/* Error detected by uCode */2387if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {2388IWL_ERR(trans,2389"Microcode SW error detected. Restarting 0x%X.\n",2390inta_fh);2391isr_stats->sw++;2392/* during FW reset flow report errors from there */2393if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {2394trans_pcie->imr_status = IMR_D2S_ERROR;2395wake_up(&trans_pcie->imr_waitq);2396} else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {2397trans_pcie->fw_reset_state = FW_RESET_ERROR;2398wake_up(&trans_pcie->fw_reset_waitq);2399} else {2400iwl_pcie_irq_handle_error(trans);2401}24022403if (trans_pcie->sx_state == IWL_SX_WAITING) {2404trans_pcie->sx_state = IWL_SX_ERROR;2405wake_up(&trans_pcie->sx_waitq);2406}2407}24082409/* After checking FH register check HW register */2410#ifdef CONFIG_IWLWIFI_DEBUG2411if (iwl_have_debug_level(IWL_DL_ISR)) {2412IWL_DEBUG_ISR(trans,2413"ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",2414entry->entry, inta_hw, trans_pcie->hw_mask,2415iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));2416if (inta_hw & ~trans_pcie->hw_mask)2417IWL_DEBUG_ISR(trans,2418"We got a masked interrupt 0x%08x\n",2419inta_hw & ~trans_pcie->hw_mask);2420}2421#endif24222423inta_hw &= trans_pcie->hw_mask;24242425/* Alive notification via Rx interrupt will do the real work */2426if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {2427IWL_DEBUG_ISR(trans, "Alive interrupt\n");2428isr_stats->alive++;2429if (trans->mac_cfg->gen2) {2430/* We can restock, since firmware configured the RFH */2431iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);2432}2433}24342435/*2436* In some rare cases when the HW is in a bad state, we may2437* get this interrupt too early, when prph_info is still NULL.2438* So make sure that it's not NULL to prevent crashing.2439*/2440if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {2441u32 sleep_notif =2442le32_to_cpu(trans_pcie->prph_info->sleep_notif);24432444if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||2445sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {2446IWL_DEBUG_ISR(trans,2447"Sx interrupt: sleep notification = 0x%x\n",2448sleep_notif);2449if (trans_pcie->sx_state == IWL_SX_WAITING) {2450trans_pcie->sx_state = IWL_SX_COMPLETE;2451wake_up(&trans_pcie->sx_waitq);2452} else {2453IWL_ERR(trans,2454"unexpected Sx interrupt (0x%x)\n",2455sleep_notif);2456}2457} else {2458/* uCode wakes up after power-down sleep */2459IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");2460iwl_pcie_rxq_check_wrptr(trans);2461iwl_pcie_txq_check_wrptrs(trans);24622463isr_stats->wakeup++;2464}2465}24662467/* Chip got too hot and stopped itself */2468if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {2469IWL_ERR(trans, "Microcode CT kill error detected.\n");2470isr_stats->ctkill++;2471}24722473/* HW RF KILL switch toggled */2474if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)2475iwl_pcie_handle_rfkill_irq(trans, true);24762477if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {2478IWL_ERR(trans,2479"Hardware error detected. Restarting.\n");24802481isr_stats->hw++;2482trans->dbg.hw_error = true;2483iwl_pcie_irq_handle_error(trans);2484}24852486if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)2487iwl_trans_pcie_handle_reset_interrupt(trans);24882489if (!polling)2490iwl_pcie_clear_irq(trans, entry->entry);24912492lock_map_release(&trans->sync_cmd_lockdep_map);24932494return IRQ_HANDLED;2495}249624972498