Path: blob/main/sys/contrib/dev/iwlwifi/pcie/gen1_2/trans.c
48406 views
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause1/*2* Copyright (C) 2007-2015, 2018-2024 Intel Corporation3* Copyright (C) 2013-2015 Intel Mobile Communications GmbH4* Copyright (C) 2016-2017 Intel Deutschland GmbH5*/6#include <linux/pci.h>7#include <linux/interrupt.h>8#include <linux/debugfs.h>9#include <linux/sched.h>10#include <linux/bitops.h>11#include <linux/gfp.h>12#include <linux/vmalloc.h>13#include <linux/module.h>14#include <linux/wait.h>15#include <linux/seq_file.h>16#if defined(__FreeBSD__)17#include <sys/rman.h>18#include <linux/delay.h>19#endif2021#include "iwl-drv.h"22#include "iwl-trans.h"23#include "iwl-csr.h"24#include "iwl-prph.h"25#include "iwl-scd.h"26#include "iwl-agn-hw.h"27#include "fw/error-dump.h"28#include "fw/dbg.h"29#include "fw/api/tx.h"30#include "fw/acpi.h"31#include "fw/api/tx.h"32#include "mei/iwl-mei.h"33#include "internal.h"34#include "iwl-fh.h"35#include "pcie/iwl-context-info-v2.h"36#include "pcie/utils.h"3738/* extended range in FW SRAM */39#define IWL_FW_MEM_EXTENDED_START 0x4000040#define IWL_FW_MEM_EXTENDED_END 0x57FFF4142int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)43{44/* Reset entire device - do controller reset (results in SHRD_HW_RST) */45if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {46iwl_set_bit(trans, CSR_GP_CNTRL,47CSR_GP_CNTRL_REG_FLAG_SW_RESET);48usleep_range(10000, 20000);49} else {50iwl_set_bit(trans, CSR_RESET,51CSR_RESET_REG_FLAG_SW_RESET);52usleep_range(5000, 6000);53}5455if (retake_ownership)56return iwl_pcie_prepare_card_hw(trans);5758return 0;59}6061static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)62{63struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;6465if (!fw_mon->size)66return;6768dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,69fw_mon->physical);7071fw_mon->block = NULL;72fw_mon->physical = 0;73fw_mon->size = 0;74}7576static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,77u8 max_power)78{79struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;80void *block = NULL;81dma_addr_t physical = 0;82u32 size = 0;83u8 power;8485if (fw_mon->size) {86memset(fw_mon->block, 0, fw_mon->size);87return;88}8990/* need at least 2 KiB, so stop at 11 */91for (power = max_power; power >= 11; power--) {92size = BIT(power);93block = dma_alloc_coherent(trans->dev, size, &physical,94GFP_KERNEL | __GFP_NOWARN);95if (!block)96continue;9798IWL_INFO(trans,99"Allocated 0x%08x bytes for firmware monitor.\n",100size);101break;102}103104if (WARN_ON_ONCE(!block))105return;106107if (power != max_power)108IWL_ERR(trans,109"Sorry - debug buffer is only %luK while you requested %luK\n",110(unsigned long)BIT(power - 10),111(unsigned long)BIT(max_power - 10));112113fw_mon->block = block;114fw_mon->physical = physical;115fw_mon->size = size;116}117118void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)119{120if (!max_power) {121/* default max_power is maximum */122max_power = 26;123} else {124max_power += 11;125}126127if (WARN(max_power > 26,128"External buffer size for monitor is too big %d, check the FW TLV\n",129max_power))130return;131132iwl_pcie_alloc_fw_monitor_block(trans, max_power);133}134135static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)136{137iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,138((reg & 0x0000ffff) | (2 << 28)));139return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);140}141142static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)143{144iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);145iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,146((reg & 0x0000ffff) | (3 << 28)));147}148149static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)150{151if (trans->mac_cfg->base->apmg_not_supported)152return;153154if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))155iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,156APMG_PS_CTRL_VAL_PWR_SRC_VAUX,157~APMG_PS_CTRL_MSK_PWR_SRC);158else159iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,160APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,161~APMG_PS_CTRL_MSK_PWR_SRC);162}163164/* PCI registers */165#define PCI_CFG_RETRY_TIMEOUT 0x041166167void iwl_pcie_apm_config(struct iwl_trans *trans)168{169struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);170u16 lctl;171u16 cap;172173/*174* L0S states have been found to be unstable with our devices175* and in newer hardware they are not officially supported at176* all, so we must always set the L0S_DISABLED bit.177*/178iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);179180pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);181trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);182183pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);184trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;185IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",186(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",187trans->ltr_enabled ? "En" : "Dis");188}189190/*191* Start up NIC's basic functionality after it has been reset192* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())193* NOTE: This does not load uCode nor start the embedded processor194*/195static int iwl_pcie_apm_init(struct iwl_trans *trans)196{197int ret;198199IWL_DEBUG_INFO(trans, "Init card's basic functions\n");200201/*202* Use "set_bit" below rather than "write", to preserve any hardware203* bits already set by default after reset.204*/205206/* Disable L0S exit timer (platform NMI Work/Around) */207if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)208iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,209CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);210211/*212* Disable L0s without affecting L1;213* don't wait for ICH L0s (ICH bug W/A)214*/215iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,216CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);217218/* Set FH wait threshold to maximum (HW error during stress W/A) */219iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);220221/*222* Enable HAP INTA (interrupt from management bus) to223* wake device's PCI Express link L1a -> L0s224*/225iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,226CSR_HW_IF_CONFIG_REG_HAP_WAKE);227228iwl_pcie_apm_config(trans);229230/* Configure analog phase-lock-loop before activating to D0A */231if (trans->mac_cfg->base->pll_cfg)232iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);233234ret = iwl_finish_nic_init(trans);235if (ret)236return ret;237238if (trans->cfg->host_interrupt_operation_mode) {239/*240* This is a bit of an abuse - This is needed for 7260 / 3160241* only check host_interrupt_operation_mode even if this is242* not related to host_interrupt_operation_mode.243*244* Enable the oscillator to count wake up time for L1 exit. This245* consumes slightly more power (100uA) - but allows to be sure246* that we wake up from L1 on time.247*248* This looks weird: read twice the same register, discard the249* value, set a bit, and yet again, read that same register250* just to discard the value. But that's the way the hardware251* seems to like it.252*/253iwl_read_prph(trans, OSC_CLK);254iwl_read_prph(trans, OSC_CLK);255iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);256iwl_read_prph(trans, OSC_CLK);257iwl_read_prph(trans, OSC_CLK);258}259260/*261* Enable DMA clock and wait for it to stabilize.262*263* Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"264* bits do not disable clocks. This preserves any hardware265* bits already set by default in "CLK_CTRL_REG" after reset.266*/267if (!trans->mac_cfg->base->apmg_not_supported) {268iwl_write_prph(trans, APMG_CLK_EN_REG,269APMG_CLK_VAL_DMA_CLK_RQT);270udelay(20);271272/* Disable L1-Active */273iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,274APMG_PCIDEV_STT_VAL_L1_ACT_DIS);275276/* Clear the interrupt in APMG if the NIC is in RFKILL */277iwl_write_prph(trans, APMG_RTC_INT_STT_REG,278APMG_RTC_INT_STT_RFKILL);279}280281set_bit(STATUS_DEVICE_ENABLED, &trans->status);282283return 0;284}285286/*287* Enable LP XTAL to avoid HW bug where device may consume much power if288* FW is not loaded after device reset. LP XTAL is disabled by default289* after device HW reset. Do it only if XTAL is fed by internal source.290* Configure device's "persistence" mode to avoid resetting XTAL again when291* SHRD_HW_RST occurs in S3.292*/293static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)294{295int ret;296u32 apmg_gp1_reg;297u32 apmg_xtal_cfg_reg;298u32 dl_cfg_reg;299300/* Force XTAL ON */301iwl_trans_set_bit(trans, CSR_GP_CNTRL,302CSR_GP_CNTRL_REG_FLAG_XTAL_ON);303304ret = iwl_trans_pcie_sw_reset(trans, true);305306if (!ret)307ret = iwl_finish_nic_init(trans);308309if (WARN_ON(ret)) {310/* Release XTAL ON request */311iwl_trans_clear_bit(trans, CSR_GP_CNTRL,312CSR_GP_CNTRL_REG_FLAG_XTAL_ON);313return;314}315316/*317* Clear "disable persistence" to avoid LP XTAL resetting when318* SHRD_HW_RST is applied in S3.319*/320iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,321APMG_PCIDEV_STT_VAL_PERSIST_DIS);322323/*324* Force APMG XTAL to be active to prevent its disabling by HW325* caused by APMG idle state.326*/327apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,328SHR_APMG_XTAL_CFG_REG);329iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,330apmg_xtal_cfg_reg |331SHR_APMG_XTAL_CFG_XTAL_ON_REQ);332333ret = iwl_trans_pcie_sw_reset(trans, true);334if (ret)335IWL_ERR(trans,336"iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");337338/* Enable LP XTAL by indirect access through CSR */339apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);340iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |341SHR_APMG_GP1_WF_XTAL_LP_EN |342SHR_APMG_GP1_CHICKEN_BIT_SELECT);343344/* Clear delay line clock power up */345dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);346iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &347~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);348349/*350* Enable persistence mode to avoid LP XTAL resetting when351* SHRD_HW_RST is applied in S3.352*/353iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,354CSR_HW_IF_CONFIG_REG_PERSISTENCE);355356/*357* Clear "initialization complete" bit to move adapter from358* D0A* (powered-up Active) --> D0U* (Uninitialized) state.359*/360iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);361362/* Activates XTAL resources monitor */363iwl_trans_set_bit(trans, CSR_MONITOR_CFG_REG,364CSR_MONITOR_XTAL_RESOURCES);365366/* Release XTAL ON request */367iwl_trans_clear_bit(trans, CSR_GP_CNTRL,368CSR_GP_CNTRL_REG_FLAG_XTAL_ON);369udelay(10);370371/* Release APMG XTAL */372iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,373apmg_xtal_cfg_reg &374~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);375}376377void iwl_pcie_apm_stop_master(struct iwl_trans *trans)378{379int ret;380381/* stop device's busmaster DMA activity */382383if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {384iwl_set_bit(trans, CSR_GP_CNTRL,385CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);386387ret = iwl_poll_bits(trans, CSR_GP_CNTRL,388CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,389100);390usleep_range(10000, 20000);391} else {392iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);393394ret = iwl_poll_bits(trans, CSR_RESET,395CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);396}397398if (ret)399IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");400401IWL_DEBUG_INFO(trans, "stop master\n");402}403404static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)405{406IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");407408if (op_mode_leave) {409if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))410iwl_pcie_apm_init(trans);411412/* inform ME that we are leaving */413if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000)414iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,415APMG_PCIDEV_STT_VAL_WAKE_ME);416else if (trans->mac_cfg->device_family >=417IWL_DEVICE_FAMILY_8000) {418iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,419CSR_RESET_LINK_PWR_MGMT_DISABLED);420iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,421CSR_HW_IF_CONFIG_REG_WAKE_ME |422CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);423mdelay(1);424iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,425CSR_RESET_LINK_PWR_MGMT_DISABLED);426}427mdelay(5);428}429430clear_bit(STATUS_DEVICE_ENABLED, &trans->status);431432/* Stop device's DMA activity */433iwl_pcie_apm_stop_master(trans);434435if (trans->cfg->lp_xtal_workaround) {436iwl_pcie_apm_lp_xtal_enable(trans);437return;438}439440iwl_trans_pcie_sw_reset(trans, false);441442/*443* Clear "initialization complete" bit to move adapter from444* D0A* (powered-up Active) --> D0U* (Uninitialized) state.445*/446iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);447}448449static int iwl_pcie_nic_init(struct iwl_trans *trans)450{451struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);452int ret;453454/* nic_init */455spin_lock_bh(&trans_pcie->irq_lock);456ret = iwl_pcie_apm_init(trans);457spin_unlock_bh(&trans_pcie->irq_lock);458459if (ret)460return ret;461462iwl_pcie_set_pwr(trans, false);463464iwl_op_mode_nic_config(trans->op_mode);465466/* Allocate the RX queue, or reset if it is already allocated */467ret = iwl_pcie_rx_init(trans);468if (ret)469return ret;470471/* Allocate or reset and init all Tx and Command queues */472if (iwl_pcie_tx_init(trans)) {473iwl_pcie_rx_free(trans);474return -ENOMEM;475}476477if (trans->mac_cfg->base->shadow_reg_enable) {478/* enable shadow regs in HW */479iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);480IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");481}482483return 0;484}485486#define HW_READY_TIMEOUT (50)487488/* Note: returns poll_bit return value, which is >= 0 if success */489static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)490{491int ret;492493iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,494CSR_HW_IF_CONFIG_REG_PCI_OWN_SET);495496/* See if we got it */497ret = iwl_poll_bits(trans, CSR_HW_IF_CONFIG_REG,498CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,499HW_READY_TIMEOUT);500501if (!ret)502iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);503504IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret ? " not" : "");505return ret;506}507508/* Note: returns standard 0/-ERROR code */509int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)510{511int ret;512int iter;513514IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");515516ret = iwl_pcie_set_hw_ready(trans);517/* If the card is ready, exit 0 */518if (!ret) {519trans->csme_own = false;520return 0;521}522523iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,524CSR_RESET_LINK_PWR_MGMT_DISABLED);525usleep_range(1000, 2000);526527for (iter = 0; iter < 10; iter++) {528int t = 0;529530/* If HW is not ready, prepare the conditions to check again */531iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,532CSR_HW_IF_CONFIG_REG_WAKE_ME);533534do {535ret = iwl_pcie_set_hw_ready(trans);536if (!ret) {537trans->csme_own = false;538return 0;539}540541if (iwl_mei_is_connected()) {542IWL_DEBUG_INFO(trans,543"Couldn't prepare the card but SAP is connected\n");544trans->csme_own = true;545if (trans->mac_cfg->device_family !=546IWL_DEVICE_FAMILY_9000)547IWL_ERR(trans,548"SAP not supported for this NIC family\n");549550return -EBUSY;551}552553usleep_range(200, 1000);554t += 200;555} while (t < 150000);556msleep(25);557}558559IWL_ERR(trans, "Couldn't prepare the card\n");560561return ret;562}563564/*565* ucode566*/567static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,568u32 dst_addr, dma_addr_t phy_addr,569u32 byte_cnt)570{571iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),572FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);573574iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),575dst_addr);576577iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),578phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);579580iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),581(iwl_get_dma_hi_addr(phy_addr)582<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);583584iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),585BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |586BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |587FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);588589iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),590FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |591FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |592FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);593}594595static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,596u32 dst_addr, dma_addr_t phy_addr,597u32 byte_cnt)598{599struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);600int ret;601602trans_pcie->ucode_write_complete = false;603604if (!iwl_trans_grab_nic_access(trans))605return -EIO;606607iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,608byte_cnt);609iwl_trans_release_nic_access(trans);610611ret = wait_event_timeout(trans_pcie->ucode_write_waitq,612trans_pcie->ucode_write_complete, 5 * HZ);613if (!ret) {614IWL_ERR(trans, "Failed to load firmware chunk!\n");615iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);616return -ETIMEDOUT;617}618619return 0;620}621622static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,623const struct fw_desc *section)624{625u8 *v_addr;626dma_addr_t p_addr;627u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);628int ret = 0;629630IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",631section_num);632633v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,634GFP_KERNEL | __GFP_NOWARN);635if (!v_addr) {636IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");637chunk_sz = PAGE_SIZE;638v_addr = dma_alloc_coherent(trans->dev, chunk_sz,639&p_addr, GFP_KERNEL);640if (!v_addr)641return -ENOMEM;642}643644for (offset = 0; offset < section->len; offset += chunk_sz) {645u32 copy_size, dst_addr;646bool extended_addr = false;647648copy_size = min_t(u32, chunk_sz, section->len - offset);649dst_addr = section->offset + offset;650651if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&652dst_addr <= IWL_FW_MEM_EXTENDED_END)653extended_addr = true;654655if (extended_addr)656iwl_set_bits_prph(trans, LMPM_CHICK,657LMPM_CHICK_EXTENDED_ADDR_SPACE);658659memcpy(v_addr, (const u8 *)section->data + offset, copy_size);660ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,661copy_size);662663if (extended_addr)664iwl_clear_bits_prph(trans, LMPM_CHICK,665LMPM_CHICK_EXTENDED_ADDR_SPACE);666667if (ret) {668IWL_ERR(trans,669"Could not load the [%d] uCode section\n",670section_num);671break;672}673}674675dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);676return ret;677}678679static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,680const struct fw_img *image,681int cpu,682int *first_ucode_section)683{684int shift_param;685int i, ret = 0, sec_num = 0x1;686u32 val, last_read_idx = 0;687688if (cpu == 1) {689shift_param = 0;690*first_ucode_section = 0;691} else {692shift_param = 16;693(*first_ucode_section)++;694}695696for (i = *first_ucode_section; i < image->num_sec; i++) {697last_read_idx = i;698699/*700* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between701* CPU1 to CPU2.702* PAGING_SEPARATOR_SECTION delimiter - separate between703* CPU2 non paged to CPU2 paging sec.704*/705if (!image->sec[i].data ||706image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||707image->sec[i].offset == PAGING_SEPARATOR_SECTION) {708IWL_DEBUG_FW(trans,709"Break since Data not valid or Empty section, sec = %d\n",710i);711break;712}713714ret = iwl_pcie_load_section(trans, i, &image->sec[i]);715if (ret)716return ret;717718/* Notify ucode of loaded section number and status */719val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);720val = val | (sec_num << shift_param);721iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);722723sec_num = (sec_num << 1) | 0x1;724}725726*first_ucode_section = last_read_idx;727728iwl_enable_interrupts(trans);729730if (trans->mac_cfg->gen2) {731if (cpu == 1)732iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,7330xFFFF);734else735iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,7360xFFFFFFFF);737} else {738if (cpu == 1)739iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,7400xFFFF);741else742iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,7430xFFFFFFFF);744}745746return 0;747}748749static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,750const struct fw_img *image,751int cpu,752int *first_ucode_section)753{754int i, ret = 0;755u32 last_read_idx = 0;756757if (cpu == 1)758*first_ucode_section = 0;759else760(*first_ucode_section)++;761762for (i = *first_ucode_section; i < image->num_sec; i++) {763last_read_idx = i;764765/*766* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between767* CPU1 to CPU2.768* PAGING_SEPARATOR_SECTION delimiter - separate between769* CPU2 non paged to CPU2 paging sec.770*/771if (!image->sec[i].data ||772image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||773image->sec[i].offset == PAGING_SEPARATOR_SECTION) {774IWL_DEBUG_FW(trans,775"Break since Data not valid or Empty section, sec = %d\n",776i);777break;778}779780ret = iwl_pcie_load_section(trans, i, &image->sec[i]);781if (ret)782return ret;783}784785*first_ucode_section = last_read_idx;786787return 0;788}789790static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)791{792enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;793struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =794&trans->dbg.fw_mon_cfg[alloc_id];795struct iwl_dram_data *frag;796797if (!iwl_trans_dbg_ini_valid(trans))798return;799800if (le32_to_cpu(fw_mon_cfg->buf_location) ==801IWL_FW_INI_LOCATION_SRAM_PATH) {802IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");803/* set sram monitor by enabling bit 7 */804iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,805CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);806807return;808}809810if (le32_to_cpu(fw_mon_cfg->buf_location) !=811IWL_FW_INI_LOCATION_DRAM_PATH ||812!trans->dbg.fw_mon_ini[alloc_id].num_frags)813return;814815frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];816817IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",818alloc_id);819820iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,821frag->physical >> MON_BUFF_SHIFT_VER2);822iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,823(frag->physical + frag->size - 256) >>824MON_BUFF_SHIFT_VER2);825}826827void iwl_pcie_apply_destination(struct iwl_trans *trans)828{829const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;830const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;831int i;832833if (iwl_trans_dbg_ini_valid(trans)) {834iwl_pcie_apply_destination_ini(trans);835return;836}837838IWL_INFO(trans, "Applying debug destination %s\n",839get_fw_dbg_mode_string(dest->monitor_mode));840841if (dest->monitor_mode == EXTERNAL_MODE)842iwl_pcie_alloc_fw_monitor(trans, dest->size_power);843else844IWL_WARN(trans, "PCI should have external buffer debug\n");845846for (i = 0; i < trans->dbg.n_dest_reg; i++) {847u32 addr = le32_to_cpu(dest->reg_ops[i].addr);848u32 val = le32_to_cpu(dest->reg_ops[i].val);849850switch (dest->reg_ops[i].op) {851case CSR_ASSIGN:852iwl_write32(trans, addr, val);853break;854case CSR_SETBIT:855iwl_set_bit(trans, addr, BIT(val));856break;857case CSR_CLEARBIT:858iwl_clear_bit(trans, addr, BIT(val));859break;860case PRPH_ASSIGN:861iwl_write_prph(trans, addr, val);862break;863case PRPH_SETBIT:864iwl_set_bits_prph(trans, addr, BIT(val));865break;866case PRPH_CLEARBIT:867iwl_clear_bits_prph(trans, addr, BIT(val));868break;869case PRPH_BLOCKBIT:870if (iwl_read_prph(trans, addr) & BIT(val)) {871IWL_ERR(trans,872"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",873val, addr);874goto monitor;875}876break;877default:878IWL_ERR(trans, "FW debug - unknown OP %d\n",879dest->reg_ops[i].op);880break;881}882}883884monitor:885if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {886iwl_write_prph(trans, le32_to_cpu(dest->base_reg),887fw_mon->physical >> dest->base_shift);888if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)889iwl_write_prph(trans, le32_to_cpu(dest->end_reg),890(fw_mon->physical + fw_mon->size -891256) >> dest->end_shift);892else893iwl_write_prph(trans, le32_to_cpu(dest->end_reg),894(fw_mon->physical + fw_mon->size) >>895dest->end_shift);896}897}898899static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,900const struct fw_img *image)901{902int ret = 0;903int first_ucode_section;904905IWL_DEBUG_FW(trans, "working with %s CPU\n",906image->is_dual_cpus ? "Dual" : "Single");907908/* load to FW the binary non secured sections of CPU1 */909ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);910if (ret)911return ret;912913if (image->is_dual_cpus) {914/* set CPU2 header address */915iwl_write_prph(trans,916LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,917LMPM_SECURE_CPU2_HDR_MEM_SPACE);918919/* load to FW the binary sections of CPU2 */920ret = iwl_pcie_load_cpu_sections(trans, image, 2,921&first_ucode_section);922if (ret)923return ret;924}925926if (iwl_pcie_dbg_on(trans))927iwl_pcie_apply_destination(trans);928929iwl_enable_interrupts(trans);930931/* release CPU reset */932iwl_write32(trans, CSR_RESET, 0);933934return 0;935}936937static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,938const struct fw_img *image)939{940int ret = 0;941int first_ucode_section;942943IWL_DEBUG_FW(trans, "working with %s CPU\n",944image->is_dual_cpus ? "Dual" : "Single");945946if (iwl_pcie_dbg_on(trans))947iwl_pcie_apply_destination(trans);948949IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",950iwl_read_prph(trans, WFPM_GP2));951952/*953* Set default value. On resume reading the values that were954* zeored can provide debug data on the resume flow.955* This is for debugging only and has no functional impact.956*/957iwl_write_prph(trans, WFPM_GP2, 0x01010101);958959/* configure the ucode to be ready to get the secured image */960/* release CPU reset */961iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);962963/* load to FW the binary Secured sections of CPU1 */964ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,965&first_ucode_section);966if (ret)967return ret;968969/* load to FW the binary sections of CPU2 */970return iwl_pcie_load_cpu_sections_8000(trans, image, 2,971&first_ucode_section);972}973974bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)975{976struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);977bool hw_rfkill = iwl_is_rfkill_set(trans);978bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);979bool report;980981if (hw_rfkill) {982set_bit(STATUS_RFKILL_HW, &trans->status);983set_bit(STATUS_RFKILL_OPMODE, &trans->status);984} else {985clear_bit(STATUS_RFKILL_HW, &trans->status);986if (trans_pcie->opmode_down)987clear_bit(STATUS_RFKILL_OPMODE, &trans->status);988}989990report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);991992if (prev != report)993iwl_trans_pcie_rf_kill(trans, report, false);994995return hw_rfkill;996}997998struct iwl_causes_list {999u16 mask_reg;1000u8 bit;1001u8 addr;1002};10031004#define IWL_CAUSE(reg, mask) \1005{ \1006.mask_reg = reg, \1007.bit = ilog2(mask), \1008.addr = ilog2(mask) + \1009((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \1010(reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \10110xffff), /* causes overflow warning */ \1012}10131014static const struct iwl_causes_list causes_list_common[] = {1015IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),1016IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),1017IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),1018IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),1019IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),1020IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),1021IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),1022IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR),1023IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),1024IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),1025IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),1026IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),1027IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),1028IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),1029IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),1030};10311032static const struct iwl_causes_list causes_list_pre_bz[] = {1033IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),1034};10351036static const struct iwl_causes_list causes_list_bz[] = {1037IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),1038};10391040static void iwl_pcie_map_list(struct iwl_trans *trans,1041const struct iwl_causes_list *causes,1042int arr_size, int val)1043{1044int i;10451046for (i = 0; i < arr_size; i++) {1047iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);1048iwl_clear_bit(trans, causes[i].mask_reg,1049BIT(causes[i].bit));1050}1051}10521053static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)1054{1055struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1056int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;1057/*1058* Access all non RX causes and map them to the default irq.1059* In case we are missing at least one interrupt vector,1060* the first interrupt vector will serve non-RX and FBQ causes.1061*/1062iwl_pcie_map_list(trans, causes_list_common,1063ARRAY_SIZE(causes_list_common), val);1064if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)1065iwl_pcie_map_list(trans, causes_list_bz,1066ARRAY_SIZE(causes_list_bz), val);1067else1068iwl_pcie_map_list(trans, causes_list_pre_bz,1069ARRAY_SIZE(causes_list_pre_bz), val);1070}10711072static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)1073{1074struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1075u32 offset =1076trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;1077u32 val, idx;10781079/*1080* The first RX queue - fallback queue, which is designated for1081* management frame, command responses etc, is always mapped to the1082* first interrupt vector. The other RX queues are mapped to1083* the other (N - 2) interrupt vectors.1084*/1085val = BIT(MSIX_FH_INT_CAUSES_Q(0));1086for (idx = 1; idx < trans->info.num_rxqs; idx++) {1087iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),1088MSIX_FH_INT_CAUSES_Q(idx - offset));1089val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));1090}1091iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);10921093val = MSIX_FH_INT_CAUSES_Q(0);1094if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)1095val |= MSIX_NON_AUTO_CLEAR_CAUSE;1096iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);10971098if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)1099iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);1100}11011102void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)1103{1104struct iwl_trans *trans = trans_pcie->trans;11051106if (!trans_pcie->msix_enabled) {1107if (trans->mac_cfg->mq_rx_supported &&1108test_bit(STATUS_DEVICE_ENABLED, &trans->status))1109iwl_write_umac_prph(trans, UREG_CHICK,1110UREG_CHICK_MSI_ENABLE);1111return;1112}1113/*1114* The IVAR table needs to be configured again after reset,1115* but if the device is disabled, we can't write to1116* prph.1117*/1118if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))1119iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);11201121/*1122* Each cause from the causes list above and the RX causes is1123* represented as a byte in the IVAR table. The first nibble1124* represents the bound interrupt vector of the cause, the second1125* represents no auto clear for this cause. This will be set if its1126* interrupt vector is bound to serve other causes.1127*/1128iwl_pcie_map_rx_causes(trans);11291130iwl_pcie_map_non_rx_causes(trans);1131}11321133static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)1134{1135struct iwl_trans *trans = trans_pcie->trans;11361137iwl_pcie_conf_msix_hw(trans_pcie);11381139if (!trans_pcie->msix_enabled)1140return;11411142trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);1143trans_pcie->fh_mask = trans_pcie->fh_init_mask;1144trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);1145trans_pcie->hw_mask = trans_pcie->hw_init_mask;1146}11471148static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)1149{1150struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);11511152lockdep_assert_held(&trans_pcie->mutex);11531154if (trans_pcie->is_down)1155return;11561157trans_pcie->is_down = true;11581159/* tell the device to stop sending interrupts */1160iwl_disable_interrupts(trans);11611162/* device going down, Stop using ICT table */1163iwl_pcie_disable_ict(trans);11641165/*1166* If a HW restart happens during firmware loading,1167* then the firmware loading might call this function1168* and later it might be called again due to the1169* restart. So don't process again if the device is1170* already dead.1171*/1172if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {1173IWL_DEBUG_INFO(trans,1174"DEVICE_ENABLED bit was set and is now cleared\n");1175if (!from_irq)1176iwl_pcie_synchronize_irqs(trans);1177iwl_pcie_rx_napi_sync(trans);1178iwl_pcie_tx_stop(trans);1179iwl_pcie_rx_stop(trans);11801181/* Power-down device's busmaster DMA clocks */1182if (!trans->mac_cfg->base->apmg_not_supported) {1183iwl_write_prph(trans, APMG_CLK_DIS_REG,1184APMG_CLK_VAL_DMA_CLK_RQT);1185udelay(5);1186}1187}11881189/* Make sure (redundant) we've released our request to stay awake */1190if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)1191iwl_clear_bit(trans, CSR_GP_CNTRL,1192CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);1193else1194iwl_clear_bit(trans, CSR_GP_CNTRL,1195CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);11961197/* Stop the device, and put it in low power state */1198iwl_pcie_apm_stop(trans, false);11991200/* re-take ownership to prevent other users from stealing the device */1201iwl_trans_pcie_sw_reset(trans, true);12021203/*1204* Upon stop, the IVAR table gets erased, so msi-x won't1205* work. This causes a bug in RF-KILL flows, since the interrupt1206* that enables radio won't fire on the correct irq, and the1207* driver won't be able to handle the interrupt.1208* Configure the IVAR table again after reset.1209*/1210iwl_pcie_conf_msix_hw(trans_pcie);12111212/*1213* Upon stop, the APM issues an interrupt if HW RF kill is set.1214* This is a bug in certain verions of the hardware.1215* Certain devices also keep sending HW RF kill interrupt all1216* the time, unless the interrupt is ACKed even if the interrupt1217* should be masked. Re-ACK all the interrupts here.1218*/1219iwl_disable_interrupts(trans);12201221/* clear all status bits */1222clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);1223clear_bit(STATUS_INT_ENABLED, &trans->status);1224clear_bit(STATUS_TPOWER_PMI, &trans->status);12251226/*1227* Even if we stop the HW, we still want the RF kill1228* interrupt1229*/1230iwl_enable_rfkill_int(trans);1231}12321233void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)1234{1235struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);12361237if (trans_pcie->msix_enabled) {1238int i;12391240for (i = 0; i < trans_pcie->alloc_vecs; i++)1241synchronize_irq(trans_pcie->msix_entries[i].vector);1242} else {1243synchronize_irq(trans_pcie->pci_dev->irq);1244}1245}12461247int iwl_trans_pcie_start_fw(struct iwl_trans *trans,1248const struct iwl_fw *fw,1249const struct fw_img *img,1250bool run_in_rfkill)1251{1252struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1253bool hw_rfkill;1254int ret;12551256/* This may fail if AMT took ownership of the device */1257if (iwl_pcie_prepare_card_hw(trans)) {1258IWL_WARN(trans, "Exit HW not ready\n");1259return -EIO;1260}12611262iwl_enable_rfkill_int(trans);12631264iwl_write32(trans, CSR_INT, 0xFFFFFFFF);12651266/*1267* We enabled the RF-Kill interrupt and the handler may very1268* well be running. Disable the interrupts to make sure no other1269* interrupt can be fired.1270*/1271iwl_disable_interrupts(trans);12721273/* Make sure it finished running */1274iwl_pcie_synchronize_irqs(trans);12751276mutex_lock(&trans_pcie->mutex);12771278/* If platform's RF_KILL switch is NOT set to KILL */1279hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);1280if (hw_rfkill && !run_in_rfkill) {1281ret = -ERFKILL;1282goto out;1283}12841285/* Someone called stop_device, don't try to start_fw */1286if (trans_pcie->is_down) {1287IWL_WARN(trans,1288"Can't start_fw since the HW hasn't been started\n");1289ret = -EIO;1290goto out;1291}12921293/* make sure rfkill handshake bits are cleared */1294iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);1295iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,1296CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);12971298/* clear (again), then enable host interrupts */1299iwl_write32(trans, CSR_INT, 0xFFFFFFFF);13001301ret = iwl_pcie_nic_init(trans);1302if (ret) {1303IWL_ERR(trans, "Unable to init nic\n");1304goto out;1305}13061307/*1308* Now, we load the firmware and don't want to be interrupted, even1309* by the RF-Kill interrupt (hence mask all the interrupt besides the1310* FH_TX interrupt which is needed to load the firmware). If the1311* RF-Kill switch is toggled, we will find out after having loaded1312* the firmware and return the proper value to the caller.1313*/1314iwl_enable_fw_load_int(trans);13151316/* really make sure rfkill handshake bits are cleared */1317iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);1318iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);13191320/* Load the given image to the HW */1321if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)1322ret = iwl_pcie_load_given_ucode_8000(trans, img);1323else1324ret = iwl_pcie_load_given_ucode(trans, img);13251326/* re-check RF-Kill state since we may have missed the interrupt */1327hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);1328if (hw_rfkill && !run_in_rfkill)1329ret = -ERFKILL;13301331out:1332mutex_unlock(&trans_pcie->mutex);1333return ret;1334}13351336void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)1337{1338iwl_pcie_reset_ict(trans);1339iwl_pcie_tx_start(trans);1340}13411342void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,1343bool was_in_rfkill)1344{1345bool hw_rfkill;13461347/*1348* Check again since the RF kill state may have changed while1349* all the interrupts were disabled, in this case we couldn't1350* receive the RF kill interrupt and update the state in the1351* op_mode.1352* Don't call the op_mode if the rkfill state hasn't changed.1353* This allows the op_mode to call stop_device from the rfkill1354* notification without endless recursion. Under very rare1355* circumstances, we might have a small recursion if the rfkill1356* state changed exactly now while we were called from stop_device.1357* This is very unlikely but can happen and is supported.1358*/1359hw_rfkill = iwl_is_rfkill_set(trans);1360if (hw_rfkill) {1361set_bit(STATUS_RFKILL_HW, &trans->status);1362set_bit(STATUS_RFKILL_OPMODE, &trans->status);1363} else {1364clear_bit(STATUS_RFKILL_HW, &trans->status);1365clear_bit(STATUS_RFKILL_OPMODE, &trans->status);1366}1367if (hw_rfkill != was_in_rfkill)1368iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);1369}13701371void iwl_trans_pcie_stop_device(struct iwl_trans *trans)1372{1373struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1374bool was_in_rfkill;13751376iwl_op_mode_time_point(trans->op_mode,1377IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,1378NULL);13791380mutex_lock(&trans_pcie->mutex);1381trans_pcie->opmode_down = true;1382was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);1383_iwl_trans_pcie_stop_device(trans, false);1384iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);1385mutex_unlock(&trans_pcie->mutex);1386}13871388void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)1389{1390struct iwl_trans_pcie __maybe_unused *trans_pcie =1391IWL_TRANS_GET_PCIE_TRANS(trans);13921393lockdep_assert_held(&trans_pcie->mutex);13941395IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",1396state ? "disabled" : "enabled");1397if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&1398!WARN_ON(trans->mac_cfg->gen2))1399_iwl_trans_pcie_stop_device(trans, from_irq);1400}14011402static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,1403bool test, bool reset)1404{1405iwl_disable_interrupts(trans);14061407/*1408* in testing mode, the host stays awake and the1409* hardware won't be reset (not even partially)1410*/1411if (test)1412return;14131414iwl_pcie_disable_ict(trans);14151416iwl_pcie_synchronize_irqs(trans);14171418if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {1419iwl_clear_bit(trans, CSR_GP_CNTRL,1420CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);1421iwl_clear_bit(trans, CSR_GP_CNTRL,1422CSR_GP_CNTRL_REG_FLAG_MAC_INIT);1423} else {1424iwl_clear_bit(trans, CSR_GP_CNTRL,1425CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);1426iwl_clear_bit(trans, CSR_GP_CNTRL,1427CSR_GP_CNTRL_REG_FLAG_INIT_DONE);1428}14291430if (reset) {1431/*1432* reset TX queues -- some of their registers reset during S31433* so if we don't reset everything here the D3 image would try1434* to execute some invalid memory upon resume1435*/1436iwl_trans_pcie_tx_reset(trans);1437}14381439iwl_pcie_set_pwr(trans, true);1440}14411442static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)1443{1444struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1445int ret;14461447if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)1448return 0;14491450trans_pcie->sx_state = IWL_SX_WAITING;14511452if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)1453iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,1454suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :1455UREG_DOORBELL_TO_ISR6_RESUME);1456else1457iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,1458suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :1459CSR_IPC_SLEEP_CONTROL_RESUME);14601461ret = wait_event_timeout(trans_pcie->sx_waitq,1462trans_pcie->sx_state != IWL_SX_WAITING,14632 * HZ);1464if (!ret) {1465IWL_ERR(trans, "Timeout %s D3\n",1466suspend ? "entering" : "exiting");1467ret = -ETIMEDOUT;1468} else {1469ret = 0;1470}14711472if (trans_pcie->sx_state == IWL_SX_ERROR) {1473IWL_ERR(trans, "FW error while %s D3\n",1474suspend ? "entering" : "exiting");1475ret = -EIO;1476}14771478/* Invalidate it toward next suspend or resume */1479trans_pcie->sx_state = IWL_SX_INVALID;14801481return ret;1482}14831484int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)1485{1486int ret;14871488if (!reset)1489/* Enable persistence mode to avoid reset */1490iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,1491CSR_HW_IF_CONFIG_REG_PERSISTENCE);14921493ret = iwl_pcie_d3_handshake(trans, true);1494if (ret)1495return ret;14961497iwl_pcie_d3_complete_suspend(trans, test, reset);14981499return 0;1500}15011502int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,1503enum iwl_d3_status *status,1504bool test, bool reset)1505{1506struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1507u32 val;1508int ret;15091510if (test) {1511iwl_enable_interrupts(trans);1512*status = IWL_D3_STATUS_ALIVE;1513ret = 0;1514goto out;1515}15161517if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)1518iwl_set_bit(trans, CSR_GP_CNTRL,1519CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);1520else1521iwl_set_bit(trans, CSR_GP_CNTRL,1522CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);15231524ret = iwl_finish_nic_init(trans);1525if (ret)1526return ret;15271528/*1529* Reconfigure IVAR table in case of MSIX or reset ict table in1530* MSI mode since HW reset erased it.1531* Also enables interrupts - none will happen as1532* the device doesn't know we're waking it up, only when1533* the opmode actually tells it after this call.1534*/1535iwl_pcie_conf_msix_hw(trans_pcie);1536if (!trans_pcie->msix_enabled)1537iwl_pcie_reset_ict(trans);1538iwl_enable_interrupts(trans);15391540iwl_pcie_set_pwr(trans, false);15411542if (!reset) {1543iwl_clear_bit(trans, CSR_GP_CNTRL,1544CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);1545} else {1546iwl_trans_pcie_tx_reset(trans);15471548ret = iwl_pcie_rx_init(trans);1549if (ret) {1550IWL_ERR(trans,1551"Failed to resume the device (RX reset)\n");1552return ret;1553}1554}15551556IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",1557iwl_read_umac_prph(trans, WFPM_GP2));15581559val = iwl_read32(trans, CSR_RESET);1560if (val & CSR_RESET_REG_FLAG_NEVO_RESET)1561*status = IWL_D3_STATUS_RESET;1562else1563*status = IWL_D3_STATUS_ALIVE;15641565out:1566if (*status == IWL_D3_STATUS_ALIVE)1567ret = iwl_pcie_d3_handshake(trans, false);1568else1569trans->state = IWL_TRANS_NO_FW;15701571return ret;1572}15731574static void1575iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,1576struct iwl_trans *trans,1577const struct iwl_mac_cfg *mac_cfg,1578struct iwl_trans_info *info)1579{1580struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1581int max_irqs, num_irqs, i, ret;1582u16 pci_cmd;1583u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;15841585if (!mac_cfg->mq_rx_supported)1586goto enable_msi;15871588if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000)1589max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;15901591max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);1592for (i = 0; i < max_irqs; i++)1593trans_pcie->msix_entries[i].entry = i;15941595num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,1596MSIX_MIN_INTERRUPT_VECTORS,1597max_irqs);1598if (num_irqs < 0) {1599IWL_DEBUG_INFO(trans,1600"Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",1601num_irqs);1602goto enable_msi;1603}1604trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;16051606IWL_DEBUG_INFO(trans,1607"MSI-X enabled. %d interrupt vectors were allocated\n",1608num_irqs);16091610/*1611* In case the OS provides fewer interrupts than requested, different1612* causes will share the same interrupt vector as follows:1613* One interrupt less: non rx causes shared with FBQ.1614* Two interrupts less: non rx causes shared with FBQ and RSS.1615* More than two interrupts: we will use fewer RSS queues.1616*/1617if (num_irqs <= max_irqs - 2) {1618info->num_rxqs = num_irqs + 1;1619trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |1620IWL_SHARED_IRQ_FIRST_RSS;1621} else if (num_irqs == max_irqs - 1) {1622info->num_rxqs = num_irqs;1623trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;1624} else {1625info->num_rxqs = num_irqs - 1;1626}16271628IWL_DEBUG_INFO(trans,1629"MSI-X enabled with rx queues %d, vec mask 0x%x\n",1630info->num_rxqs, trans_pcie->shared_vec_mask);16311632WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES);16331634trans_pcie->alloc_vecs = num_irqs;1635trans_pcie->msix_enabled = true;1636return;16371638enable_msi:1639info->num_rxqs = 1;1640ret = pci_enable_msi(pdev);1641if (ret) {1642dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);1643/* enable rfkill interrupt: hw bug w/a */1644pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);1645if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {1646pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;1647pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);1648}1649}1650}16511652static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans,1653struct iwl_trans_info *info)1654{1655#if defined(CONFIG_SMP)1656int iter_rx_q, i, ret, cpu, offset;1657struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);16581659i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;1660iter_rx_q = info->num_rxqs - 1 + i;1661offset = 1 + i;1662for (; i < iter_rx_q ; i++) {1663/*1664* Get the cpu prior to the place to search1665* (i.e. return will be > i - 1).1666*/1667cpu = cpumask_next(i - offset, cpu_online_mask);1668cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);1669ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,1670&trans_pcie->affinity_mask[i]);1671if (ret)1672IWL_ERR(trans_pcie->trans,1673"Failed to set affinity mask for IRQ %d\n",1674trans_pcie->msix_entries[i].vector);1675}1676#endif1677}16781679static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,1680struct iwl_trans_pcie *trans_pcie,1681struct iwl_trans_info *info)1682{1683int i;16841685for (i = 0; i < trans_pcie->alloc_vecs; i++) {1686int ret;1687struct msix_entry *msix_entry;1688const char *qname = queue_name(&pdev->dev, trans_pcie, i);16891690if (!qname)1691return -ENOMEM;16921693msix_entry = &trans_pcie->msix_entries[i];1694ret = devm_request_threaded_irq(&pdev->dev,1695msix_entry->vector,1696iwl_pcie_msix_isr,1697(i == trans_pcie->def_irq) ?1698iwl_pcie_irq_msix_handler :1699iwl_pcie_irq_rx_msix_handler,1700IRQF_SHARED,1701qname,1702msix_entry);1703if (ret) {1704IWL_ERR(trans_pcie->trans,1705"Error allocating IRQ %d\n", i);17061707return ret;1708}1709}1710iwl_pcie_irq_set_affinity(trans_pcie->trans, info);17111712return 0;1713}17141715static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)1716{1717u32 hpm, wprot;17181719switch (trans->mac_cfg->device_family) {1720case IWL_DEVICE_FAMILY_9000:1721wprot = PREG_PRPH_WPROT_9000;1722break;1723case IWL_DEVICE_FAMILY_22000:1724wprot = PREG_PRPH_WPROT_22000;1725break;1726default:1727return 0;1728}17291730hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);1731if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) {1732u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);17331734if (wprot_val & PREG_WFPM_ACCESS) {1735IWL_ERR(trans,1736"Error, can not clear persistence bit\n");1737return -EPERM;1738}1739iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,1740hpm & ~PERSISTENCE_BIT);1741}17421743return 0;1744}17451746static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)1747{1748int ret;17491750ret = iwl_finish_nic_init(trans);1751if (ret < 0)1752return ret;17531754iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,1755HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);1756udelay(20);1757iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,1758HPM_HIPM_GEN_CFG_CR_PG_EN |1759HPM_HIPM_GEN_CFG_CR_SLP_EN);1760udelay(20);1761iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,1762HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);17631764return iwl_trans_pcie_sw_reset(trans, true);1765}17661767int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)1768{1769struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1770int err;17711772lockdep_assert_held(&trans_pcie->mutex);17731774err = iwl_pcie_prepare_card_hw(trans);1775if (err) {1776IWL_ERR(trans, "Error while preparing HW: %d\n", err);1777return err;1778}17791780err = iwl_trans_pcie_clear_persistence_bit(trans);1781if (err)1782return err;17831784err = iwl_trans_pcie_sw_reset(trans, true);1785if (err)1786return err;17871788if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&1789trans->mac_cfg->integrated) {1790err = iwl_pcie_gen2_force_power_gating(trans);1791if (err)1792return err;1793}17941795err = iwl_pcie_apm_init(trans);1796if (err)1797return err;17981799iwl_pcie_init_msix(trans_pcie);18001801/* From now on, the op_mode will be kept updated about RF kill state */1802iwl_enable_rfkill_int(trans);18031804trans_pcie->opmode_down = false;18051806/* Set is_down to false here so that...*/1807trans_pcie->is_down = false;18081809/* ...rfkill can call stop_device and set it false if needed */1810iwl_pcie_check_hw_rf_kill(trans);18111812return 0;1813}18141815int iwl_trans_pcie_start_hw(struct iwl_trans *trans)1816{1817struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1818int ret;18191820mutex_lock(&trans_pcie->mutex);1821ret = _iwl_trans_pcie_start_hw(trans);1822mutex_unlock(&trans_pcie->mutex);18231824return ret;1825}18261827void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)1828{1829struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);18301831mutex_lock(&trans_pcie->mutex);18321833/* disable interrupts - don't enable HW RF kill interrupt */1834iwl_disable_interrupts(trans);18351836iwl_pcie_apm_stop(trans, true);18371838iwl_disable_interrupts(trans);18391840iwl_pcie_disable_ict(trans);18411842mutex_unlock(&trans_pcie->mutex);18431844iwl_pcie_synchronize_irqs(trans);1845}18461847#if defined(__linux__)1848void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)1849{1850writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);1851}18521853void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)1854{1855writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);1856}18571858u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)1859{1860return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);1861}1862#elif defined(__FreeBSD__)1863void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)1864{1865bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);1866}18671868void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)1869{1870bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);1871}18721873u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)1874{1875u32 v;18761877v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs);1878return (v);1879}1880#endif18811882static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)1883{1884if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)1885return 0x00FFFFFF;1886else1887return 0x000FFFFF;1888}18891890u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)1891{1892u32 mask = iwl_trans_pcie_prph_msk(trans);18931894iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,1895((reg & mask) | (3 << 24)));1896return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);1897}18981899void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)1900{1901u32 mask = iwl_trans_pcie_prph_msk(trans);19021903iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,1904((addr & mask) | (3 << 24)));1905iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);1906}19071908void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)1909{1910struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);19111912/* free all first - we might be reconfigured for a different size */1913iwl_pcie_free_rbs_pool(trans);19141915trans_pcie->rx_page_order =1916iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);1917trans_pcie->rx_buf_bytes =1918iwl_trans_get_rb_size(trans->conf.rx_buf_size);1919}19201921void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,1922struct device *dev)1923{1924u8 i;1925struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;19261927/* free DRAM payloads */1928for (i = 0; i < dram_regions->n_regions; i++) {1929dma_free_coherent(dev, dram_regions->drams[i].size,1930dram_regions->drams[i].block,1931dram_regions->drams[i].physical);1932}1933dram_regions->n_regions = 0;19341935/* free DRAM addresses array */1936if (desc_dram->block) {1937dma_free_coherent(dev, desc_dram->size,1938desc_dram->block,1939desc_dram->physical);1940}1941memset(desc_dram, 0, sizeof(*desc_dram));1942}19431944static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)1945{1946struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);19471948iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd);1949}19501951static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)1952{1953struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1954struct iwl_cmd_header_wide bad_cmd = {1955.cmd = INVALID_WR_PTR_CMD,1956.group_id = DEBUG_GROUP,1957.sequence = cpu_to_le16(0xffff),1958.length = cpu_to_le16(0),1959.version = 0,1960};1961int ret;19621963ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd,1964sizeof(bad_cmd));1965if (ret)1966return ret;1967memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));1968return 0;1969}19701971void iwl_trans_pcie_free(struct iwl_trans *trans)1972{1973struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);1974int i;19751976iwl_pcie_synchronize_irqs(trans);19771978if (trans->mac_cfg->gen2)1979iwl_txq_gen2_tx_free(trans);1980else1981iwl_pcie_tx_free(trans);1982iwl_pcie_rx_free(trans);19831984if (trans_pcie->rba.alloc_wq) {1985destroy_workqueue(trans_pcie->rba.alloc_wq);1986trans_pcie->rba.alloc_wq = NULL;1987}19881989if (trans_pcie->msix_enabled) {1990for (i = 0; i < trans_pcie->alloc_vecs; i++) {1991irq_set_affinity_hint(1992trans_pcie->msix_entries[i].vector,1993NULL);1994}19951996trans_pcie->msix_enabled = false;1997} else {1998iwl_pcie_free_ict(trans);1999}20002001free_netdev(trans_pcie->napi_dev);20022003iwl_pcie_free_invalid_tx_cmd(trans);20042005iwl_pcie_free_fw_monitor(trans);20062007iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,2008trans->dev);2009iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,2010trans->dev);20112012mutex_destroy(&trans_pcie->mutex);20132014#ifdef CONFIG_INET2015if (trans_pcie->txqs.tso_hdr_page) {2016for_each_possible_cpu(i) {2017struct iwl_tso_hdr_page *p =2018per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);20192020if (p && p->page)2021__free_page(p->page);2022}20232024free_percpu(trans_pcie->txqs.tso_hdr_page);2025}2026#endif20272028iwl_trans_free(trans);2029}20302031static union acpi_object *2032iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value)2033{2034#ifdef CONFIG_ACPI2035struct iwl_dsm_internal_product_reset_cmd pldr_arg = {2036.cmd = cmd,2037.value = value,2038};2039union acpi_object arg = {2040.buffer.type = ACPI_TYPE_BUFFER,2041.buffer.length = sizeof(pldr_arg),2042.buffer.pointer = (void *)&pldr_arg,2043};2044static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,20450x81, 0x4F, 0x75, 0xE4,20460xDD, 0x26, 0xB5, 0xFD);20472048if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV,2049DSM_INTERNAL_FUNC_PRODUCT_RESET))2050return ERR_PTR(-ENODEV);20512052return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV,2053DSM_INTERNAL_FUNC_PRODUCT_RESET,2054&arg, &dsm_guid);2055#else2056return ERR_PTR(-EOPNOTSUPP);2057#endif2058}20592060void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev)2061{2062union acpi_object *res;20632064res = iwl_trans_pcie_call_prod_reset_dsm(pdev,2065DSM_INTERNAL_PLDR_CMD_GET_MODE,20660);2067if (IS_ERR(res))2068return;20692070if (res->type != ACPI_TYPE_INTEGER)2071IWL_ERR_DEV(&pdev->dev,2072"unexpected return type from product reset DSM\n");2073else2074IWL_DEBUG_DEV_POWER(&pdev->dev,2075"product reset mode is 0x%llx\n",2076res->integer.value);20772078ACPI_FREE(res);2079}20802081static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable,2082bool integrated)2083{2084union acpi_object *res;2085u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0;20862087if (!integrated)2088mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR |2089DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON;20902091res = iwl_trans_pcie_call_prod_reset_dsm(pdev,2092DSM_INTERNAL_PLDR_CMD_SET_MODE,2093mode);2094if (IS_ERR(res)) {2095if (enable)2096IWL_ERR_DEV(&pdev->dev,2097"ACPI _DSM not available (%d), cannot do product reset\n",2098(int)PTR_ERR(res));2099return;2100}21012102ACPI_FREE(res);2103IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n",2104enable ? "En" : "Dis");2105iwl_trans_pcie_check_product_reset_mode(pdev);2106}21072108void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev)2109{2110union acpi_object *res;21112112res = iwl_trans_pcie_call_prod_reset_dsm(pdev,2113DSM_INTERNAL_PLDR_CMD_GET_STATUS,21140);2115if (IS_ERR(res))2116return;21172118if (res->type != ACPI_TYPE_INTEGER)2119IWL_ERR_DEV(&pdev->dev,2120"unexpected return type from product reset DSM\n");2121else2122IWL_DEBUG_DEV_POWER(&pdev->dev,2123"product reset status is 0x%llx\n",2124res->integer.value);21252126ACPI_FREE(res);2127}21282129static void iwl_trans_pcie_call_reset(struct pci_dev *pdev)2130{2131#ifdef CONFIG_ACPI2132struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };2133union acpi_object *p, *ref;2134acpi_status status;2135int ret = -EINVAL;21362137status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev),2138"_PRR", NULL, &buffer);2139if (ACPI_FAILURE(status)) {2140IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n");2141goto out;2142}2143p = buffer.pointer;21442145if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) {2146pci_err(pdev, "Bad _PRR return type\n");2147goto out;2148}21492150ref = &p->package.elements[0];2151if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {2152pci_err(pdev, "_PRR wasn't a reference\n");2153goto out;2154}21552156status = acpi_evaluate_object(ref->reference.handle,2157"_RST", NULL, NULL);2158if (ACPI_FAILURE(status)) {2159pci_err(pdev,2160"Failed to call _RST on object returned by _PRR (%d)\n",2161status);2162goto out;2163}2164ret = 0;2165out:2166kfree(buffer.pointer);2167if (!ret) {2168IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n");2169return;2170}2171IWL_DEBUG_DEV_POWER(&pdev->dev,2172"No BIOS support, using pci_reset_function()\n");2173#endif2174pci_reset_function(pdev);2175}21762177struct iwl_trans_pcie_removal {2178struct pci_dev *pdev;2179struct work_struct work;2180enum iwl_reset_mode mode;2181bool integrated;2182};21832184static void iwl_trans_pcie_removal_wk(struct work_struct *wk)2185{2186struct iwl_trans_pcie_removal *removal =2187container_of(wk, struct iwl_trans_pcie_removal, work);2188struct pci_dev *pdev = removal->pdev;2189static char *prop[] = {"EVENT=INACCESSIBLE", NULL};2190struct pci_bus *bus;21912192pci_lock_rescan_remove();21932194bus = pdev->bus;2195/* in this case, something else already removed the device */2196if (!bus)2197goto out;21982199kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);22002201if (removal->mode == IWL_RESET_MODE_PROD_RESET) {2202struct pci_dev *bt = NULL;22032204if (!removal->integrated) {2205/* discrete devices have WiFi/BT at function 0/1 */2206int slot = PCI_SLOT(pdev->devfn);2207int func = PCI_FUNC(pdev->devfn);22082209if (func == 0)2210bt = pci_get_slot(bus, PCI_DEVFN(slot, 1));2211else2212pci_info(pdev, "Unexpected function %d\n",2213func);2214} else {2215/* on integrated we have to look up by ID (same bus) */2216static const struct pci_device_id bt_device_ids[] = {2217#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) }2218BT_DEV(0xA876), /* LNL */2219BT_DEV(0xE476), /* PTL-P */2220BT_DEV(0xE376), /* PTL-H */2221BT_DEV(0xD346), /* NVL-H */2222BT_DEV(0x6E74), /* NVL-S */2223BT_DEV(0x4D76), /* WCL */2224BT_DEV(0xD246), /* RZL-H */2225BT_DEV(0x6C46), /* RZL-M */2226{}2227};2228struct pci_dev *tmp = NULL;22292230for_each_pci_dev(tmp) {2231if (tmp->bus != bus)2232continue;22332234if (pci_match_id(bt_device_ids, tmp)) {2235bt = tmp;2236break;2237}2238}2239}22402241if (bt) {2242pci_info(bt, "Removal by WiFi due to product reset\n");2243pci_stop_and_remove_bus_device(bt);2244pci_dev_put(bt);2245}2246}22472248iwl_trans_pcie_set_product_reset(pdev,2249removal->mode ==2250IWL_RESET_MODE_PROD_RESET,2251removal->integrated);2252if (removal->mode >= IWL_RESET_MODE_FUNC_RESET)2253iwl_trans_pcie_call_reset(pdev);22542255pci_stop_and_remove_bus_device(pdev);2256pci_dev_put(pdev);22572258if (removal->mode >= IWL_RESET_MODE_RESCAN) {2259#if defined(__linux__)2260if (bus->parent)2261bus = bus->parent;2262#elif defined(__FreeBSD__)2263/* XXX-TODO */2264#endif2265pci_rescan_bus(bus);2266}22672268out:2269pci_unlock_rescan_remove();22702271kfree(removal);2272module_put(THIS_MODULE);2273}22742275void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)2276{2277struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2278struct iwl_trans_pcie_removal *removal;2279char _msg = 0, *msg = &_msg;22802281if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY ||2282mode == IWL_RESET_MODE_BACKOFF))2283return;22842285if (test_bit(STATUS_TRANS_DEAD, &trans->status))2286return;22872288if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) {2289mode = IWL_RESET_MODE_FUNC_RESET;2290if (trans_pcie->me_present < 0)2291msg = " instead of product reset as ME may be present";2292else2293msg = " instead of product reset as ME is present";2294}22952296IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg);22972298iwl_pcie_dump_csr(trans);22992300/*2301* get a module reference to avoid doing this2302* while unloading anyway and to avoid2303* scheduling a work with code that's being2304* removed.2305*/2306if (!try_module_get(THIS_MODULE)) {2307IWL_ERR(trans,2308"Module is being unloaded - abort\n");2309return;2310}23112312removal = kzalloc(sizeof(*removal), GFP_ATOMIC);2313if (!removal) {2314module_put(THIS_MODULE);2315return;2316}2317/*2318* we don't need to clear this flag, because2319* the trans will be freed and reallocated.2320*/2321set_bit(STATUS_TRANS_DEAD, &trans->status);23222323removal->pdev = to_pci_dev(trans->dev);2324removal->mode = mode;2325removal->integrated = trans->mac_cfg->integrated;2326INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);2327pci_dev_get(removal->pdev);2328schedule_work(&removal->work);2329}2330EXPORT_SYMBOL(iwl_trans_pcie_reset);23312332/*2333* This version doesn't disable BHs but rather assumes they're2334* already disabled.2335*/2336bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)2337{2338int ret;2339struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2340u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;2341u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |2342CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;2343u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;23442345if (test_bit(STATUS_TRANS_DEAD, &trans->status))2346return false;23472348spin_lock(&trans_pcie->reg_lock);23492350if (trans_pcie->cmd_hold_nic_awake)2351goto out;23522353if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {2354write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;2355mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;2356poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;2357}23582359/* this bit wakes up the NIC */2360iwl_trans_set_bit(trans, CSR_GP_CNTRL, write);2361if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)2362udelay(2);23632364/*2365* These bits say the device is running, and should keep running for2366* at least a short while (at least as long as MAC_ACCESS_REQ stays 1),2367* but they do not indicate that embedded SRAM is restored yet;2368* HW with volatile SRAM must save/restore contents to/from2369* host DRAM when sleeping/waking for power-saving.2370* Each direction takes approximately 1/4 millisecond; with this2371* overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a2372* series of register accesses are expected (e.g. reading Event Log),2373* to keep device from sleeping.2374*2375* CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that2376* SRAM is okay/restored. We don't check that here because this call2377* is just for hardware register access; but GP1 MAC_SLEEP2378* check is a good idea before accessing the SRAM of HW with2379* volatile SRAM (e.g. reading Event Log).2380*2381* 5000 series and later (including 1000 series) have non-volatile SRAM,2382* and do not save/restore SRAM when power cycling.2383*/2384ret = iwl_poll_bits_mask(trans, CSR_GP_CNTRL, poll, mask, 15000);2385if (unlikely(ret)) {2386u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);23872388if (silent) {2389spin_unlock(&trans_pcie->reg_lock);2390return false;2391}23922393WARN_ONCE(1,2394"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",2395cntrl);23962397iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);23982399if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)2400iwl_trans_pcie_reset(trans,2401IWL_RESET_MODE_REMOVE_ONLY);2402else2403iwl_write32(trans, CSR_RESET,2404CSR_RESET_REG_FLAG_FORCE_NMI);24052406spin_unlock(&trans_pcie->reg_lock);2407return false;2408}24092410out:2411/*2412* Fool sparse by faking we release the lock - sparse will2413* track nic_access anyway.2414*/2415__release(&trans_pcie->reg_lock);2416return true;2417}24182419bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)2420{2421bool ret;24222423local_bh_disable();2424ret = __iwl_trans_pcie_grab_nic_access(trans, false);2425if (ret) {2426/* keep BHs disabled until iwl_trans_pcie_release_nic_access */2427return ret;2428}2429local_bh_enable();2430return false;2431}24322433void __releases(nic_access_nobh)2434iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)2435{2436struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);24372438lockdep_assert_held(&trans_pcie->reg_lock);24392440/*2441* Fool sparse by faking we acquiring the lock - sparse will2442* track nic_access anyway.2443*/2444__acquire(&trans_pcie->reg_lock);24452446if (trans_pcie->cmd_hold_nic_awake)2447goto out;2448if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)2449iwl_trans_clear_bit(trans, CSR_GP_CNTRL,2450CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);2451else2452iwl_trans_clear_bit(trans, CSR_GP_CNTRL,2453CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);2454/*2455* Above we read the CSR_GP_CNTRL register, which will flush2456* any previous writes, but we need the write that clears the2457* MAC_ACCESS_REQ bit to be performed before any other writes2458* scheduled on different CPUs (after we drop reg_lock).2459*/2460out:2461__release(nic_access_nobh);2462spin_unlock_bh(&trans_pcie->reg_lock);2463}24642465int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,2466void *buf, int dwords)2467{2468#define IWL_MAX_HW_ERRS 52469unsigned int num_consec_hw_errors = 0;2470int offs = 0;2471u32 *vals = buf;24722473while (offs < dwords) {2474/* limit the time we spin here under lock to 1/2s */2475unsigned long end = jiffies + HZ / 2;2476bool resched = false;24772478if (iwl_trans_grab_nic_access(trans)) {2479iwl_write32(trans, HBUS_TARG_MEM_RADDR,2480addr + 4 * offs);24812482while (offs < dwords) {2483vals[offs] = iwl_read32(trans,2484HBUS_TARG_MEM_RDAT);24852486if (iwl_trans_is_hw_error_value(vals[offs]))2487num_consec_hw_errors++;2488else2489num_consec_hw_errors = 0;24902491if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) {2492iwl_trans_release_nic_access(trans);2493return -EIO;2494}24952496offs++;24972498if (time_after(jiffies, end)) {2499resched = true;2500break;2501}2502}2503iwl_trans_release_nic_access(trans);25042505if (resched)2506cond_resched();2507} else {2508return -EBUSY;2509}2510}25112512return 0;2513}25142515int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,2516u32 *val)2517{2518return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,2519ofs, val);2520}25212522#define IWL_FLUSH_WAIT_MS 200025232524int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,2525struct iwl_trans_rxq_dma_data *data)2526{2527struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);25282529if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)2530return -EINVAL;25312532data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;2533data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;2534data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;2535data->fr_bd_wid = 0;25362537return 0;2538}25392540int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)2541{2542struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2543struct iwl_txq *txq;2544unsigned long now = jiffies;2545bool overflow_tx;2546u8 wr_ptr;25472548/* Make sure the NIC is still alive in the bus */2549if (test_bit(STATUS_TRANS_DEAD, &trans->status))2550return -ENODEV;25512552if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))2553return -EINVAL;25542555IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);2556txq = trans_pcie->txqs.txq[txq_idx];25572558spin_lock_bh(&txq->lock);2559overflow_tx = txq->overflow_tx ||2560!skb_queue_empty(&txq->overflow_q);2561spin_unlock_bh(&txq->lock);25622563wr_ptr = READ_ONCE(txq->write_ptr);25642565while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||2566overflow_tx) &&2567!time_after(jiffies,2568now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {2569u8 write_ptr = READ_ONCE(txq->write_ptr);25702571/*2572* If write pointer moved during the wait, warn only2573* if the TX came from op mode. In case TX came from2574* trans layer (overflow TX) don't warn.2575*/2576if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,2577"WR pointer moved while flushing %d -> %d\n",2578wr_ptr, write_ptr))2579return -ETIMEDOUT;2580wr_ptr = write_ptr;25812582usleep_range(1000, 2000);25832584spin_lock_bh(&txq->lock);2585overflow_tx = txq->overflow_tx ||2586!skb_queue_empty(&txq->overflow_q);2587spin_unlock_bh(&txq->lock);2588}25892590if (txq->read_ptr != txq->write_ptr) {2591IWL_ERR(trans,2592"fail to flush all tx fifo queues Q %d\n", txq_idx);2593iwl_txq_log_scd_error(trans, txq);2594return -ETIMEDOUT;2595}25962597IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);25982599return 0;2600}26012602int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)2603{2604struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2605int cnt;2606int ret = 0;26072608/* waiting for all the tx frames complete might take a while */2609for (cnt = 0;2610cnt < trans->mac_cfg->base->num_of_queues;2611cnt++) {26122613if (cnt == trans->conf.cmd_queue)2614continue;2615if (!test_bit(cnt, trans_pcie->txqs.queue_used))2616continue;2617if (!(BIT(cnt) & txq_bm))2618continue;26192620ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);2621if (ret)2622break;2623}26242625return ret;2626}26272628void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,2629u32 mask, u32 value)2630{2631struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);26322633spin_lock_bh(&trans_pcie->reg_lock);2634_iwl_trans_set_bits_mask(trans, reg, mask, value);2635spin_unlock_bh(&trans_pcie->reg_lock);2636}26372638static const char *get_csr_string(int cmd)2639{2640#define IWL_CMD(x) case x: return #x2641switch (cmd) {2642IWL_CMD(CSR_HW_IF_CONFIG_REG);2643IWL_CMD(CSR_INT_COALESCING);2644IWL_CMD(CSR_INT);2645IWL_CMD(CSR_INT_MASK);2646IWL_CMD(CSR_FH_INT_STATUS);2647IWL_CMD(CSR_GPIO_IN);2648IWL_CMD(CSR_RESET);2649IWL_CMD(CSR_GP_CNTRL);2650IWL_CMD(CSR_HW_REV);2651IWL_CMD(CSR_EEPROM_REG);2652IWL_CMD(CSR_EEPROM_GP);2653IWL_CMD(CSR_OTP_GP_REG);2654IWL_CMD(CSR_GIO_REG);2655IWL_CMD(CSR_GP_UCODE_REG);2656IWL_CMD(CSR_GP_DRIVER_REG);2657IWL_CMD(CSR_UCODE_DRV_GP1);2658IWL_CMD(CSR_UCODE_DRV_GP2);2659IWL_CMD(CSR_LED_REG);2660IWL_CMD(CSR_DRAM_INT_TBL_REG);2661IWL_CMD(CSR_GIO_CHICKEN_BITS);2662IWL_CMD(CSR_ANA_PLL_CFG);2663IWL_CMD(CSR_HW_REV_WA_REG);2664IWL_CMD(CSR_MONITOR_STATUS_REG);2665IWL_CMD(CSR_DBG_HPET_MEM_REG);2666default:2667return "UNKNOWN";2668}2669#undef IWL_CMD2670}26712672void iwl_pcie_dump_csr(struct iwl_trans *trans)2673{2674int i;2675static const u32 csr_tbl[] = {2676CSR_HW_IF_CONFIG_REG,2677CSR_INT_COALESCING,2678CSR_INT,2679CSR_INT_MASK,2680CSR_FH_INT_STATUS,2681CSR_GPIO_IN,2682CSR_RESET,2683CSR_GP_CNTRL,2684CSR_HW_REV,2685CSR_EEPROM_REG,2686CSR_EEPROM_GP,2687CSR_OTP_GP_REG,2688CSR_GIO_REG,2689CSR_GP_UCODE_REG,2690CSR_GP_DRIVER_REG,2691CSR_UCODE_DRV_GP1,2692CSR_UCODE_DRV_GP2,2693CSR_LED_REG,2694CSR_DRAM_INT_TBL_REG,2695CSR_GIO_CHICKEN_BITS,2696CSR_ANA_PLL_CFG,2697CSR_MONITOR_STATUS_REG,2698CSR_HW_REV_WA_REG,2699CSR_DBG_HPET_MEM_REG2700};2701IWL_ERR(trans, "CSR values:\n");2702IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "2703"CSR_INT_PERIODIC_REG)\n");2704for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {2705IWL_ERR(trans, " %25s: 0X%08x\n",2706get_csr_string(csr_tbl[i]),2707iwl_read32(trans, csr_tbl[i]));2708}2709}27102711#ifdef CONFIG_IWLWIFI_DEBUGFS2712/* create and remove of files */2713#define DEBUGFS_ADD_FILE(name, parent, mode) do { \2714debugfs_create_file(#name, mode, parent, trans, \2715&iwl_dbgfs_##name##_ops); \2716} while (0)27172718/* file operation */2719#define DEBUGFS_READ_FILE_OPS(name) \2720static const struct file_operations iwl_dbgfs_##name##_ops = { \2721.read = iwl_dbgfs_##name##_read, \2722.open = simple_open, \2723.llseek = generic_file_llseek, \2724};27252726#define DEBUGFS_WRITE_FILE_OPS(name) \2727static const struct file_operations iwl_dbgfs_##name##_ops = { \2728.write = iwl_dbgfs_##name##_write, \2729.open = simple_open, \2730.llseek = generic_file_llseek, \2731};27322733#define DEBUGFS_READ_WRITE_FILE_OPS(name) \2734static const struct file_operations iwl_dbgfs_##name##_ops = { \2735.write = iwl_dbgfs_##name##_write, \2736.read = iwl_dbgfs_##name##_read, \2737.open = simple_open, \2738.llseek = generic_file_llseek, \2739};27402741struct iwl_dbgfs_tx_queue_priv {2742struct iwl_trans *trans;2743};27442745struct iwl_dbgfs_tx_queue_state {2746loff_t pos;2747};27482749static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)2750{2751struct iwl_dbgfs_tx_queue_priv *priv = seq->private;2752struct iwl_dbgfs_tx_queue_state *state;27532754if (*pos >= priv->trans->mac_cfg->base->num_of_queues)2755return NULL;27562757state = kmalloc(sizeof(*state), GFP_KERNEL);2758if (!state)2759return NULL;2760state->pos = *pos;2761return state;2762}27632764static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,2765void *v, loff_t *pos)2766{2767struct iwl_dbgfs_tx_queue_priv *priv = seq->private;2768struct iwl_dbgfs_tx_queue_state *state = v;27692770*pos = ++state->pos;27712772if (*pos >= priv->trans->mac_cfg->base->num_of_queues)2773return NULL;27742775return state;2776}27772778static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)2779{2780kfree(v);2781}27822783static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)2784{2785struct iwl_dbgfs_tx_queue_priv *priv = seq->private;2786struct iwl_dbgfs_tx_queue_state *state = v;2787struct iwl_trans *trans = priv->trans;2788struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2789struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];27902791seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",2792(unsigned int)state->pos,2793!!test_bit(state->pos, trans_pcie->txqs.queue_used),2794!!test_bit(state->pos, trans_pcie->txqs.queue_stopped));2795if (txq)2796seq_printf(seq,2797"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",2798txq->read_ptr, txq->write_ptr,2799txq->need_update, txq->frozen,2800txq->n_window, txq->ampdu);2801else2802seq_puts(seq, "(unallocated)");28032804if (state->pos == trans->conf.cmd_queue)2805seq_puts(seq, " (HCMD)");2806seq_puts(seq, "\n");28072808return 0;2809}28102811static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {2812.start = iwl_dbgfs_tx_queue_seq_start,2813.next = iwl_dbgfs_tx_queue_seq_next,2814.stop = iwl_dbgfs_tx_queue_seq_stop,2815.show = iwl_dbgfs_tx_queue_seq_show,2816};28172818static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)2819{2820struct iwl_dbgfs_tx_queue_priv *priv;28212822priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,2823sizeof(*priv));28242825if (!priv)2826return -ENOMEM;28272828priv->trans = inode->i_private;2829return 0;2830}28312832static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,2833char __user *user_buf,2834size_t count, loff_t *ppos)2835{2836struct iwl_trans *trans = file->private_data;2837struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2838char *buf;2839int pos = 0, i, ret;2840size_t bufsz;28412842bufsz = sizeof(char) * 121 * trans->info.num_rxqs;28432844if (!trans_pcie->rxq)2845return -EAGAIN;28462847buf = kzalloc(bufsz, GFP_KERNEL);2848if (!buf)2849return -ENOMEM;28502851for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) {2852struct iwl_rxq *rxq = &trans_pcie->rxq[i];28532854spin_lock_bh(&rxq->lock);28552856pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",2857i);2858pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",2859rxq->read);2860pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",2861rxq->write);2862pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",2863rxq->write_actual);2864pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",2865rxq->need_update);2866pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",2867rxq->free_count);2868if (rxq->rb_stts) {2869u32 r = iwl_get_closed_rb_stts(trans, rxq);2870pos += scnprintf(buf + pos, bufsz - pos,2871"\tclosed_rb_num: %u\n", r);2872} else {2873pos += scnprintf(buf + pos, bufsz - pos,2874"\tclosed_rb_num: Not Allocated\n");2875}2876spin_unlock_bh(&rxq->lock);2877}2878ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);2879kfree(buf);28802881return ret;2882}28832884static ssize_t iwl_dbgfs_interrupt_read(struct file *file,2885char __user *user_buf,2886size_t count, loff_t *ppos)2887{2888struct iwl_trans *trans = file->private_data;2889struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2890struct isr_statistics *isr_stats = &trans_pcie->isr_stats;28912892int pos = 0;2893char *buf;2894int bufsz = 24 * 64; /* 24 items * 64 char per item */2895ssize_t ret;28962897buf = kzalloc(bufsz, GFP_KERNEL);2898if (!buf)2899return -ENOMEM;29002901pos += scnprintf(buf + pos, bufsz - pos,2902"Interrupt Statistics Report:\n");29032904pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",2905isr_stats->hw);2906pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",2907isr_stats->sw);2908if (isr_stats->sw || isr_stats->hw) {2909pos += scnprintf(buf + pos, bufsz - pos,2910"\tLast Restarting Code: 0x%X\n",2911isr_stats->err_code);2912}2913#ifdef CONFIG_IWLWIFI_DEBUG2914pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",2915isr_stats->sch);2916pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",2917isr_stats->alive);2918#endif2919pos += scnprintf(buf + pos, bufsz - pos,2920"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);29212922pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",2923isr_stats->ctkill);29242925pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",2926isr_stats->wakeup);29272928pos += scnprintf(buf + pos, bufsz - pos,2929"Rx command responses:\t\t %u\n", isr_stats->rx);29302931pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",2932isr_stats->tx);29332934pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",2935isr_stats->unhandled);29362937ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);2938kfree(buf);2939return ret;2940}29412942static ssize_t iwl_dbgfs_interrupt_write(struct file *file,2943const char __user *user_buf,2944size_t count, loff_t *ppos)2945{2946struct iwl_trans *trans = file->private_data;2947struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2948struct isr_statistics *isr_stats = &trans_pcie->isr_stats;2949u32 reset_flag;2950int ret;29512952ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);2953if (ret)2954return ret;2955if (reset_flag == 0)2956memset(isr_stats, 0, sizeof(*isr_stats));29572958return count;2959}29602961static ssize_t iwl_dbgfs_csr_write(struct file *file,2962const char __user *user_buf,2963size_t count, loff_t *ppos)2964{2965struct iwl_trans *trans = file->private_data;29662967iwl_pcie_dump_csr(trans);29682969return count;2970}29712972static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,2973char __user *user_buf,2974size_t count, loff_t *ppos)2975{2976struct iwl_trans *trans = file->private_data;2977char *buf = NULL;2978ssize_t ret;29792980ret = iwl_dump_fh(trans, &buf);2981if (ret < 0)2982return ret;2983if (!buf)2984return -EINVAL;2985ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);2986kfree(buf);2987return ret;2988}29892990static ssize_t iwl_dbgfs_rfkill_read(struct file *file,2991char __user *user_buf,2992size_t count, loff_t *ppos)2993{2994struct iwl_trans *trans = file->private_data;2995struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);2996char buf[100];2997int pos;29982999pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",3000trans_pcie->debug_rfkill,3001!(iwl_read32(trans, CSR_GP_CNTRL) &3002CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));30033004return simple_read_from_buffer(user_buf, count, ppos, buf, pos);3005}30063007static ssize_t iwl_dbgfs_rfkill_write(struct file *file,3008const char __user *user_buf,3009size_t count, loff_t *ppos)3010{3011struct iwl_trans *trans = file->private_data;3012struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);3013bool new_value;3014int ret;30153016ret = kstrtobool_from_user(user_buf, count, &new_value);3017if (ret)3018return ret;3019if (new_value == trans_pcie->debug_rfkill)3020return count;3021IWL_WARN(trans, "changing debug rfkill %d->%d\n",3022trans_pcie->debug_rfkill, new_value);3023trans_pcie->debug_rfkill = new_value;3024iwl_pcie_handle_rfkill_irq(trans, false);30253026return count;3027}30283029static int iwl_dbgfs_monitor_data_open(struct inode *inode,3030struct file *file)3031{3032struct iwl_trans *trans = inode->i_private;3033struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);30343035if (!trans->dbg.dest_tlv ||3036trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {3037IWL_ERR(trans, "Debug destination is not set to DRAM\n");3038return -ENOENT;3039}30403041if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)3042return -EBUSY;30433044trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;3045return simple_open(inode, file);3046}30473048static int iwl_dbgfs_monitor_data_release(struct inode *inode,3049struct file *file)3050{3051struct iwl_trans_pcie *trans_pcie =3052IWL_TRANS_GET_PCIE_TRANS(inode->i_private);30533054if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)3055trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;3056return 0;3057}30583059static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,3060void *buf, ssize_t *size,3061ssize_t *bytes_copied)3062{3063ssize_t buf_size_left = count - *bytes_copied;30643065buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));3066if (*size > buf_size_left)3067*size = buf_size_left;30683069*size -= copy_to_user(user_buf, buf, *size);3070*bytes_copied += *size;30713072if (buf_size_left == *size)3073return true;3074return false;3075}30763077static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,3078char __user *user_buf,3079size_t count, loff_t *ppos)3080{3081struct iwl_trans *trans = file->private_data;3082struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);3083u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;3084struct cont_rec *data = &trans_pcie->fw_mon_data;3085u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;3086ssize_t size, bytes_copied = 0;3087bool b_full;30883089if (trans->dbg.dest_tlv) {3090write_ptr_addr =3091le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);3092wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);3093} else {3094write_ptr_addr = MON_BUFF_WRPTR;3095wrap_cnt_addr = MON_BUFF_CYCLE_CNT;3096}30973098if (unlikely(!trans->dbg.rec_on))3099return 0;31003101mutex_lock(&data->mutex);3102if (data->state ==3103IWL_FW_MON_DBGFS_STATE_DISABLED) {3104mutex_unlock(&data->mutex);3105return 0;3106}31073108/* write_ptr position in bytes rather then DW */3109write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);3110wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);31113112if (data->prev_wrap_cnt == wrap_cnt) {3113size = write_ptr - data->prev_wr_ptr;3114curr_buf = cpu_addr + data->prev_wr_ptr;3115b_full = iwl_write_to_user_buf(user_buf, count,3116curr_buf, &size,3117&bytes_copied);3118data->prev_wr_ptr += size;31193120} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&3121write_ptr < data->prev_wr_ptr) {3122size = trans->dbg.fw_mon.size - data->prev_wr_ptr;3123curr_buf = cpu_addr + data->prev_wr_ptr;3124b_full = iwl_write_to_user_buf(user_buf, count,3125curr_buf, &size,3126&bytes_copied);3127data->prev_wr_ptr += size;31283129if (!b_full) {3130size = write_ptr;3131b_full = iwl_write_to_user_buf(user_buf, count,3132cpu_addr, &size,3133&bytes_copied);3134data->prev_wr_ptr = size;3135data->prev_wrap_cnt++;3136}3137} else {3138if (data->prev_wrap_cnt == wrap_cnt - 1 &&3139write_ptr > data->prev_wr_ptr)3140IWL_WARN(trans,3141"write pointer passed previous write pointer, start copying from the beginning\n");3142else if (!unlikely(data->prev_wrap_cnt == 0 &&3143data->prev_wr_ptr == 0))3144IWL_WARN(trans,3145"monitor data is out of sync, start copying from the beginning\n");31463147size = write_ptr;3148b_full = iwl_write_to_user_buf(user_buf, count,3149cpu_addr, &size,3150&bytes_copied);3151data->prev_wr_ptr = size;3152data->prev_wrap_cnt = wrap_cnt;3153}31543155mutex_unlock(&data->mutex);31563157return bytes_copied;3158}31593160static ssize_t iwl_dbgfs_rf_read(struct file *file,3161char __user *user_buf,3162size_t count, loff_t *ppos)3163{3164struct iwl_trans *trans = file->private_data;3165struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);31663167if (!trans_pcie->rf_name[0])3168return -ENODEV;31693170return simple_read_from_buffer(user_buf, count, ppos,3171trans_pcie->rf_name,3172strlen(trans_pcie->rf_name));3173}31743175static ssize_t iwl_dbgfs_reset_write(struct file *file,3176const char __user *user_buf,3177size_t count, loff_t *ppos)3178{3179struct iwl_trans *trans = file->private_data;3180static const char * const modes[] = {3181[IWL_RESET_MODE_SW_RESET] = "sw",3182[IWL_RESET_MODE_REPROBE] = "reprobe",3183[IWL_RESET_MODE_TOP_RESET] = "top",3184[IWL_RESET_MODE_REMOVE_ONLY] = "remove",3185[IWL_RESET_MODE_RESCAN] = "rescan",3186[IWL_RESET_MODE_FUNC_RESET] = "function",3187[IWL_RESET_MODE_PROD_RESET] = "product",3188};3189char buf[10] = {};3190int mode;31913192if (count > sizeof(buf) - 1)3193return -EINVAL;31943195if (copy_from_user(buf, user_buf, count))3196return -EFAULT;31973198mode = sysfs_match_string(modes, buf);3199if (mode < 0)3200return mode;32013202if (mode < IWL_RESET_MODE_REMOVE_ONLY) {3203if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))3204return -EINVAL;3205if (mode == IWL_RESET_MODE_TOP_RESET) {3206if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)3207return -EINVAL;3208trans->request_top_reset = 1;3209}3210iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS);3211iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS);3212return count;3213}32143215iwl_trans_pcie_reset(trans, mode);32163217return count;3218}32193220DEBUGFS_READ_WRITE_FILE_OPS(interrupt);3221DEBUGFS_READ_FILE_OPS(fh_reg);3222DEBUGFS_READ_FILE_OPS(rx_queue);3223DEBUGFS_WRITE_FILE_OPS(csr);3224DEBUGFS_READ_WRITE_FILE_OPS(rfkill);3225DEBUGFS_READ_FILE_OPS(rf);3226DEBUGFS_WRITE_FILE_OPS(reset);32273228static const struct file_operations iwl_dbgfs_tx_queue_ops = {3229.owner = THIS_MODULE,3230.open = iwl_dbgfs_tx_queue_open,3231.read = seq_read,3232.llseek = seq_lseek,3233.release = seq_release_private,3234};32353236static const struct file_operations iwl_dbgfs_monitor_data_ops = {3237.read = iwl_dbgfs_monitor_data_read,3238.open = iwl_dbgfs_monitor_data_open,3239.release = iwl_dbgfs_monitor_data_release,3240};32413242/* Create the debugfs files and directories */3243void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)3244{3245struct dentry *dir = trans->dbgfs_dir;32463247DEBUGFS_ADD_FILE(rx_queue, dir, 0400);3248DEBUGFS_ADD_FILE(tx_queue, dir, 0400);3249DEBUGFS_ADD_FILE(interrupt, dir, 0600);3250DEBUGFS_ADD_FILE(csr, dir, 0200);3251DEBUGFS_ADD_FILE(fh_reg, dir, 0400);3252DEBUGFS_ADD_FILE(rfkill, dir, 0600);3253DEBUGFS_ADD_FILE(monitor_data, dir, 0400);3254DEBUGFS_ADD_FILE(rf, dir, 0400);3255DEBUGFS_ADD_FILE(reset, dir, 0200);3256}32573258void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)3259{3260struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);3261struct cont_rec *data = &trans_pcie->fw_mon_data;32623263mutex_lock(&data->mutex);3264data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;3265mutex_unlock(&data->mutex);3266}3267#endif /*CONFIG_IWLWIFI_DEBUGFS */32683269static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)3270{3271struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);3272u32 cmdlen = 0;3273int i;32743275for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)3276cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);32773278return cmdlen;3279}32803281static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,3282struct iwl_fw_error_dump_data **data,3283int allocated_rb_nums)3284{3285struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);3286int max_len = trans_pcie->rx_buf_bytes;3287/* Dump RBs is supported only for pre-9000 devices (1 queue) */3288struct iwl_rxq *rxq = &trans_pcie->rxq[0];3289u32 i, r, j, rb_len = 0;32903291spin_lock_bh(&rxq->lock);32923293r = iwl_get_closed_rb_stts(trans, rxq);32943295for (i = rxq->read, j = 0;3296i != r && j < allocated_rb_nums;3297i = (i + 1) & RX_QUEUE_MASK, j++) {3298struct iwl_rx_mem_buffer *rxb = rxq->queue[i];3299struct iwl_fw_error_dump_rb *rb;33003301dma_sync_single_for_cpu(trans->dev, rxb->page_dma,3302max_len, DMA_FROM_DEVICE);33033304rb_len += sizeof(**data) + sizeof(*rb) + max_len;33053306(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);3307(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);3308rb = (void *)(*data)->data;3309rb->index = cpu_to_le32(i);3310memcpy(rb->data, page_address(rxb->page), max_len);33113312*data = iwl_fw_error_next_data(*data);3313}33143315spin_unlock_bh(&rxq->lock);33163317return rb_len;3318}3319#define IWL_CSR_TO_DUMP (0x250)33203321static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,3322struct iwl_fw_error_dump_data **data)3323{3324u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;3325__le32 *val;3326int i;33273328(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);3329(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);3330val = (void *)(*data)->data;33313332for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)3333*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));33343335*data = iwl_fw_error_next_data(*data);33363337return csr_len;3338}33393340static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,3341struct iwl_fw_error_dump_data **data)3342{3343u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;3344__le32 *val;3345int i;33463347if (!iwl_trans_grab_nic_access(trans))3348return 0;33493350(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);3351(*data)->len = cpu_to_le32(fh_regs_len);3352val = (void *)(*data)->data;33533354if (!trans->mac_cfg->gen2)3355for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;3356i += sizeof(u32))3357*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));3358else3359for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);3360i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);3361i += sizeof(u32))3362*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,3363i));33643365iwl_trans_release_nic_access(trans);33663367*data = iwl_fw_error_next_data(*data);33683369return sizeof(**data) + fh_regs_len;3370}33713372static u323373iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,3374struct iwl_fw_error_dump_fw_mon *fw_mon_data,3375u32 monitor_len)3376{3377u32 buf_size_in_dwords = (monitor_len >> 2);3378u32 *buffer = (u32 *)fw_mon_data->data;3379u32 i;33803381if (!iwl_trans_grab_nic_access(trans))3382return 0;33833384iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);3385for (i = 0; i < buf_size_in_dwords; i++)3386buffer[i] = iwl_read_umac_prph_no_grab(trans,3387MON_DMARB_RD_DATA_ADDR);3388iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);33893390iwl_trans_release_nic_access(trans);33913392return monitor_len;3393}33943395static void3396iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,3397struct iwl_fw_error_dump_fw_mon *fw_mon_data)3398{3399u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;34003401if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {3402base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;3403base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;3404write_ptr = DBGC_CUR_DBGBUF_STATUS;3405wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;3406} else if (trans->dbg.dest_tlv) {3407write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);3408wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);3409base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);3410} else {3411base = MON_BUFF_BASE_ADDR;3412write_ptr = MON_BUFF_WRPTR;3413wrap_cnt = MON_BUFF_CYCLE_CNT;3414}34153416write_ptr_val = iwl_read_prph(trans, write_ptr);3417fw_mon_data->fw_mon_cycle_cnt =3418cpu_to_le32(iwl_read_prph(trans, wrap_cnt));3419fw_mon_data->fw_mon_base_ptr =3420cpu_to_le32(iwl_read_prph(trans, base));3421if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {3422fw_mon_data->fw_mon_base_high_ptr =3423cpu_to_le32(iwl_read_prph(trans, base_high));3424write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;3425/* convert wrtPtr to DWs, to align with all HWs */3426write_ptr_val >>= 2;3427}3428fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);3429}34303431static u323432iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,3433struct iwl_fw_error_dump_data **data,3434u32 monitor_len)3435{3436struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;3437u32 len = 0;34383439if (trans->dbg.dest_tlv ||3440(fw_mon->size &&3441(trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||3442trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {3443struct iwl_fw_error_dump_fw_mon *fw_mon_data;34443445(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);3446fw_mon_data = (void *)(*data)->data;34473448iwl_trans_pcie_dump_pointers(trans, fw_mon_data);34493450len += sizeof(**data) + sizeof(*fw_mon_data);3451if (fw_mon->size) {3452memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);3453monitor_len = fw_mon->size;3454} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {3455u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);3456/*3457* Update pointers to reflect actual values after3458* shifting3459*/3460if (trans->dbg.dest_tlv->version) {3461base = (iwl_read_prph(trans, base) &3462IWL_LDBG_M2S_BUF_BA_MSK) <<3463trans->dbg.dest_tlv->base_shift;3464base *= IWL_M2S_UNIT_SIZE;3465base += trans->mac_cfg->base->smem_offset;3466} else {3467base = iwl_read_prph(trans, base) <<3468trans->dbg.dest_tlv->base_shift;3469}34703471iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data,3472monitor_len / sizeof(u32));3473} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {3474monitor_len =3475iwl_trans_pci_dump_marbh_monitor(trans,3476fw_mon_data,3477monitor_len);3478} else {3479/* Didn't match anything - output no monitor data */3480monitor_len = 0;3481}34823483len += monitor_len;3484(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));3485}34863487return len;3488}34893490static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)3491{3492if (trans->dbg.fw_mon.size) {3493*len += sizeof(struct iwl_fw_error_dump_data) +3494sizeof(struct iwl_fw_error_dump_fw_mon) +3495trans->dbg.fw_mon.size;3496return trans->dbg.fw_mon.size;3497} else if (trans->dbg.dest_tlv) {3498u32 base, end, cfg_reg, monitor_len;34993500if (trans->dbg.dest_tlv->version == 1) {3501cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);3502cfg_reg = iwl_read_prph(trans, cfg_reg);3503base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<3504trans->dbg.dest_tlv->base_shift;3505base *= IWL_M2S_UNIT_SIZE;3506base += trans->mac_cfg->base->smem_offset;35073508monitor_len =3509(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>3510trans->dbg.dest_tlv->end_shift;3511monitor_len *= IWL_M2S_UNIT_SIZE;3512} else {3513base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);3514end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);35153516base = iwl_read_prph(trans, base) <<3517trans->dbg.dest_tlv->base_shift;3518end = iwl_read_prph(trans, end) <<3519trans->dbg.dest_tlv->end_shift;35203521/* Make "end" point to the actual end */3522if (trans->mac_cfg->device_family >=3523IWL_DEVICE_FAMILY_8000 ||3524trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)3525end += (1 << trans->dbg.dest_tlv->end_shift);3526monitor_len = end - base;3527}3528*len += sizeof(struct iwl_fw_error_dump_data) +3529sizeof(struct iwl_fw_error_dump_fw_mon) +3530monitor_len;3531return monitor_len;3532}3533return 0;3534}35353536struct iwl_trans_dump_data *3537iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,3538const struct iwl_dump_sanitize_ops *sanitize_ops,3539void *sanitize_ctx)3540{3541struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);3542struct iwl_fw_error_dump_data *data;3543struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];3544struct iwl_fw_error_dump_txcmd *txcmd;3545struct iwl_trans_dump_data *dump_data;3546u32 len, num_rbs = 0, monitor_len = 0;3547int i, ptr;3548bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&3549!trans->mac_cfg->mq_rx_supported &&3550dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);35513552if (!dump_mask)3553return NULL;35543555/* transport dump header */3556len = sizeof(*dump_data);35573558/* host commands */3559if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)3560len += sizeof(*data) +3561cmdq->n_window * (sizeof(*txcmd) +3562TFD_MAX_PAYLOAD_SIZE);35633564/* FW monitor */3565if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))3566monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);35673568/* CSR registers */3569if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))3570len += sizeof(*data) + IWL_CSR_TO_DUMP;35713572/* FH registers */3573if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {3574if (trans->mac_cfg->gen2)3575len += sizeof(*data) +3576(iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -3577iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));3578else3579len += sizeof(*data) +3580(FH_MEM_UPPER_BOUND -3581FH_MEM_LOWER_BOUND);3582}35833584if (dump_rbs) {3585/* Dump RBs is supported only for pre-9000 devices (1 queue) */3586struct iwl_rxq *rxq = &trans_pcie->rxq[0];3587/* RBs */3588spin_lock_bh(&rxq->lock);3589num_rbs = iwl_get_closed_rb_stts(trans, rxq);3590num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;3591spin_unlock_bh(&rxq->lock);35923593len += num_rbs * (sizeof(*data) +3594sizeof(struct iwl_fw_error_dump_rb) +3595(PAGE_SIZE << trans_pcie->rx_page_order));3596}35973598/* Paged memory for gen2 HW */3599if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))3600for (i = 0; i < trans->init_dram.paging_cnt; i++)3601len += sizeof(*data) +3602sizeof(struct iwl_fw_error_dump_paging) +3603trans->init_dram.paging[i].size;36043605dump_data = vzalloc(len);3606if (!dump_data)3607return NULL;36083609len = 0;3610data = (void *)dump_data->data;36113612if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {3613u16 tfd_size = trans_pcie->txqs.tfd.size;36143615data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);3616txcmd = (void *)data->data;3617spin_lock_bh(&cmdq->lock);3618ptr = cmdq->write_ptr;3619for (i = 0; i < cmdq->n_window; i++) {3620u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);3621u8 tfdidx;3622u32 caplen, cmdlen;36233624if (trans->mac_cfg->gen2)3625tfdidx = idx;3626else3627tfdidx = ptr;36283629cmdlen = iwl_trans_pcie_get_cmdlen(trans,3630(u8 *)cmdq->tfds +3631tfd_size * tfdidx);3632caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);36333634if (cmdlen) {3635len += sizeof(*txcmd) + caplen;3636txcmd->cmdlen = cpu_to_le32(cmdlen);3637txcmd->caplen = cpu_to_le32(caplen);3638memcpy(txcmd->data, cmdq->entries[idx].cmd,3639caplen);3640if (sanitize_ops && sanitize_ops->frob_hcmd)3641sanitize_ops->frob_hcmd(sanitize_ctx,3642txcmd->data,3643caplen);3644txcmd = (void *)((u8 *)txcmd->data + caplen);3645}36463647ptr = iwl_txq_dec_wrap(trans, ptr);3648}3649spin_unlock_bh(&cmdq->lock);36503651data->len = cpu_to_le32(len);3652len += sizeof(*data);3653data = iwl_fw_error_next_data(data);3654}36553656if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))3657len += iwl_trans_pcie_dump_csr(trans, &data);3658if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))3659len += iwl_trans_pcie_fh_regs_dump(trans, &data);3660if (dump_rbs)3661len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);36623663/* Paged memory for gen2 HW */3664if (trans->mac_cfg->gen2 &&3665dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {3666for (i = 0; i < trans->init_dram.paging_cnt; i++) {3667struct iwl_fw_error_dump_paging *paging;3668u32 page_len = trans->init_dram.paging[i].size;36693670data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);3671data->len = cpu_to_le32(sizeof(*paging) + page_len);3672paging = (void *)data->data;3673paging->index = cpu_to_le32(i);3674memcpy(paging->data,3675trans->init_dram.paging[i].block, page_len);3676data = iwl_fw_error_next_data(data);36773678len += sizeof(*data) + sizeof(*paging) + page_len;3679}3680}3681if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))3682len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);36833684dump_data->len = len;36853686return dump_data;3687}36883689void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)3690{3691if (enable)3692iwl_enable_interrupts(trans);3693else3694iwl_disable_interrupts(trans);3695}36963697void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)3698{3699u32 inta_addr, sw_err_bit;3700struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);37013702if (trans_pcie->msix_enabled) {3703inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;3704if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)3705sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;3706else3707sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;3708} else {3709inta_addr = CSR_INT;3710sw_err_bit = CSR_INT_BIT_SW_ERR;3711}37123713iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);3714}37153716static int iwl_trans_pcie_set_txcmd_info(const struct iwl_mac_cfg *mac_cfg,3717unsigned int *txcmd_size,3718unsigned int *txcmd_align)3719{3720if (!mac_cfg->gen2) {3721*txcmd_size = sizeof(struct iwl_tx_cmd_v6);3722*txcmd_align = sizeof(void *);3723} else if (mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {3724*txcmd_size = sizeof(struct iwl_tx_cmd_v9);3725*txcmd_align = 64;3726} else {3727*txcmd_size = sizeof(struct iwl_tx_cmd);3728*txcmd_align = 128;3729}37303731*txcmd_size += sizeof(struct iwl_cmd_header);3732*txcmd_size += 36; /* biggest possible 802.11 header */37333734/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */3735if (WARN_ON((mac_cfg->gen2 && *txcmd_size >= *txcmd_align)))3736return -EINVAL;37373738return 0;3739}37403741static struct iwl_trans *3742iwl_trans_pcie_alloc(struct pci_dev *pdev,3743const struct iwl_mac_cfg *mac_cfg,3744struct iwl_trans_info *info, u8 __iomem *hw_base)3745{3746struct iwl_trans_pcie *trans_pcie, **priv;3747unsigned int txcmd_size, txcmd_align;3748struct iwl_trans *trans;3749unsigned int bc_tbl_n_entries;3750int ret, addr_size;37513752ret = iwl_trans_pcie_set_txcmd_info(mac_cfg, &txcmd_size,3753&txcmd_align);3754if (ret)3755return ERR_PTR(ret);37563757trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,3758mac_cfg, txcmd_size, txcmd_align);3759if (!trans)3760return ERR_PTR(-ENOMEM);37613762trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);37633764trans_pcie->hw_base = hw_base;37653766/* Initialize the wait queue for commands */3767init_waitqueue_head(&trans_pcie->wait_command_queue);37683769if (trans->mac_cfg->gen2) {3770trans_pcie->txqs.tfd.addr_size = 64;3771trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;3772trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);3773} else {3774trans_pcie->txqs.tfd.addr_size = 36;3775trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;3776trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);3777}37783779trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12);3780if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)3781trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11);37823783info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);37843785#ifdef CONFIG_INET3786trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);3787if (!trans_pcie->txqs.tso_hdr_page) {3788ret = -ENOMEM;3789goto out_free_trans;3790}3791#endif37923793if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)3794bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;3795else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)3796bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;3797else3798bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;37993800trans_pcie->txqs.bc_tbl_size =3801sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;3802/*3803* For gen2 devices, we use a single allocation for each byte-count3804* table, but they're pretty small (1k) so use a DMA pool that we3805* allocate here.3806*/3807if (trans->mac_cfg->gen2) {3808trans_pcie->txqs.bc_pool =3809dmam_pool_create("iwlwifi:bc", trans->dev,3810trans_pcie->txqs.bc_tbl_size,3811256, 0);3812if (!trans_pcie->txqs.bc_pool) {3813ret = -ENOMEM;3814goto out_free_tso;3815}3816}38173818/* Some things must not change even if the config does */3819WARN_ON(trans_pcie->txqs.tfd.addr_size !=3820(trans->mac_cfg->gen2 ? 64 : 36));38213822/* Initialize NAPI here - it should be before registering to mac802113823* in the opmode but after the HW struct is allocated.3824*/3825trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));3826if (!trans_pcie->napi_dev) {3827ret = -ENOMEM;3828goto out_free_tso;3829}3830/* The private struct in netdev is a pointer to struct iwl_trans_pcie */3831priv = netdev_priv(trans_pcie->napi_dev);3832*priv = trans_pcie;38333834trans_pcie->trans = trans;3835trans_pcie->opmode_down = true;3836spin_lock_init(&trans_pcie->irq_lock);3837spin_lock_init(&trans_pcie->reg_lock);3838spin_lock_init(&trans_pcie->alloc_page_lock);3839mutex_init(&trans_pcie->mutex);3840init_waitqueue_head(&trans_pcie->ucode_write_waitq);3841init_waitqueue_head(&trans_pcie->fw_reset_waitq);3842init_waitqueue_head(&trans_pcie->imr_waitq);38433844trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",3845WQ_HIGHPRI | WQ_UNBOUND, 0);3846if (!trans_pcie->rba.alloc_wq) {3847ret = -ENOMEM;3848goto out_free_ndev;3849}3850INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);38513852trans_pcie->debug_rfkill = -1;38533854if (!mac_cfg->base->pcie_l1_allowed) {3855/*3856* W/A - seems to solve weird behavior. We need to remove this3857* if we don't want to stay in L1 all the time. This wastes a3858* lot of power.3859*/3860pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |3861PCIE_LINK_STATE_L1 |3862PCIE_LINK_STATE_CLKPM);3863}38643865addr_size = trans_pcie->txqs.tfd.addr_size;3866ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));3867if (ret) {3868ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));3869/* both attempts failed: */3870if (ret) {3871dev_err(&pdev->dev, "No suitable DMA available\n");3872goto out_no_pci;3873}3874}38753876/* We disable the RETRY_TIMEOUT register (0x41) to keep3877* PCI Tx retries from interfering with C3 CPU state */3878pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);38793880trans_pcie->pci_dev = pdev;3881iwl_disable_interrupts(trans);38823883/*3884* In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have3885* changed, and now the revision step also includes bit 0-1 (no more3886* "dash" value). To keep hw_rev backwards compatible - we'll store it3887* in the old format.3888*/3889if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)3890info->hw_rev_step = info->hw_rev & 0xF;3891else3892info->hw_rev_step = (info->hw_rev & 0xC) >> 2;38933894IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev);38953896iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info);38973898init_waitqueue_head(&trans_pcie->sx_waitq);38993900ret = iwl_pcie_alloc_invalid_tx_cmd(trans);3901if (ret)3902goto out_no_pci;39033904if (trans_pcie->msix_enabled) {3905ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info);3906if (ret)3907goto out_no_pci;3908} else {3909ret = iwl_pcie_alloc_ict(trans);3910if (ret)3911goto out_no_pci;39123913ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,3914iwl_pcie_isr,3915iwl_pcie_irq_handler,3916IRQF_SHARED, DRV_NAME, trans);3917if (ret) {3918IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);3919goto out_free_ict;3920}3921}39223923#ifdef CONFIG_IWLWIFI_DEBUGFS3924trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;3925mutex_init(&trans_pcie->fw_mon_data.mutex);3926#endif39273928iwl_dbg_tlv_init(trans);39293930return trans;39313932out_free_ict:3933iwl_pcie_free_ict(trans);3934out_no_pci:3935destroy_workqueue(trans_pcie->rba.alloc_wq);3936out_free_ndev:3937free_netdev(trans_pcie->napi_dev);3938out_free_tso:3939#ifdef CONFIG_INET3940free_percpu(trans_pcie->txqs.tso_hdr_page);3941out_free_trans:3942#endif3943iwl_trans_free(trans);3944return ERR_PTR(ret);3945}39463947void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,3948u32 dst_addr, u64 src_addr, u32 byte_cnt)3949{3950iwl_write_prph(trans, IMR_UREG_CHICK,3951iwl_read_prph(trans, IMR_UREG_CHICK) |3952IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);3953iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);3954iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,3955(u32)(src_addr & 0xFFFFFFFF));3956iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,3957iwl_get_dma_hi_addr(src_addr));3958iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);3959iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,3960IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |3961IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |3962IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);3963}39643965int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,3966u32 dst_addr, u64 src_addr, u32 byte_cnt)3967{3968struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);3969int ret = -1;39703971trans_pcie->imr_status = IMR_D2S_REQUESTED;3972iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);3973ret = wait_event_timeout(trans_pcie->imr_waitq,3974trans_pcie->imr_status !=3975IMR_D2S_REQUESTED, 5 * HZ);3976if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {3977IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");3978iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);3979return -ETIMEDOUT;3980}3981trans_pcie->imr_status = IMR_D2S_IDLE;3982return 0;3983}39843985/*3986* Read rf id and cdb info from prph register and store it3987*/3988static void get_crf_id(struct iwl_trans *iwl_trans,3989struct iwl_trans_info *info)3990{3991u32 sd_reg_ver_addr;3992u32 hw_wfpm_id;3993u32 val = 0;3994u8 step;39953996if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)3997sd_reg_ver_addr = SD_REG_VER_GEN2;3998else3999sd_reg_ver_addr = SD_REG_VER;40004001/* Enable access to peripheral registers */4002val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG);4003val |= WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK;4004iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val);40054006/* Read crf info */4007info->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);40084009/* Read cnv info */4010info->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);40114012/* For BZ-W, take B step also when A step is indicated */4013if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)4014step = SILICON_B_STEP;40154016/* In BZ, the MAC step must be read from the CNVI aux register */4017if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {4018step = CNVI_AUX_MISC_CHIP_MAC_STEP(info->hw_cnv_id);40194020/* For BZ-U, take B step also when A step is indicated */4021if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(info->hw_cnv_id) ==4022CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&4023step == SILICON_A_STEP)4024step = SILICON_B_STEP;4025}40264027if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||4028CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {4029info->hw_rev_step = step;4030info->hw_rev |= step;4031}40324033/* Read cdb info (also contains the jacket info if needed in the future */4034hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);4035IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",4036info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id);4037}40384039/*4040* In case that there is no OTP on the NIC, map the rf id and cdb info4041* from the prph registers.4042*/4043static int map_crf_id(struct iwl_trans *iwl_trans,4044struct iwl_trans_info *info)4045{4046int ret = 0;4047u32 val = info->hw_crf_id;4048u32 step_id = REG_CRF_ID_STEP(val);4049u32 slave_id = REG_CRF_ID_SLAVE(val);4050u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id);4051u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans,4052WFPM_OTP_CFG1_ADDR);4053u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id);4054u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id);40554056/* Map between crf id to rf id */4057switch (REG_CRF_ID_TYPE(val)) {4058case REG_CRF_ID_TYPE_JF_1:4059info->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12);4060break;4061case REG_CRF_ID_TYPE_JF_2:4062info->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12);4063break;4064case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1:4065info->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12);4066break;4067case REG_CRF_ID_TYPE_HR_NONE_CDB:4068info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);4069break;4070case REG_CRF_ID_TYPE_HR_CDB:4071info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);4072break;4073case REG_CRF_ID_TYPE_GF:4074info->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);4075break;4076case REG_CRF_ID_TYPE_FM:4077info->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);4078break;4079case REG_CRF_ID_TYPE_WHP:4080info->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);4081break;4082case REG_CRF_ID_TYPE_PE:4083info->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);4084break;4085default:4086ret = -EIO;4087IWL_ERR(iwl_trans,4088"Can't find a correct rfid for crf id 0x%x\n",4089REG_CRF_ID_TYPE(val));4090goto out;4091}40924093/* Set Step-id */4094info->hw_rf_id |= (step_id << 8);40954096/* Set CDB capabilities */4097if (cdb_id_wfpm || slave_id) {4098info->hw_rf_id += BIT(28);4099IWL_INFO(iwl_trans, "Adding cdb to rf id\n");4100}41014102/* Set Jacket capabilities */4103if (jacket_id_wfpm || jacket_id_cnv) {4104info->hw_rf_id += BIT(29);4105IWL_INFO(iwl_trans, "Adding jacket to rf id\n");4106}41074108IWL_INFO(iwl_trans,4109"Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n",4110REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id);4111IWL_INFO(iwl_trans,4112"Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n",4113cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id);4114IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n",4115jacket_id_cnv, info->hw_cnv_id);41164117out:4118return ret;4119}41204121static void iwl_pcie_recheck_me_status(struct work_struct *wk)4122{4123struct iwl_trans_pcie *trans_pcie = container_of(wk,4124typeof(*trans_pcie),4125me_recheck_wk.work);4126u32 val;41274128val = iwl_read32(trans_pcie->trans, CSR_HW_IF_CONFIG_REG);4129trans_pcie->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);4130}41314132static void iwl_pcie_check_me_status(struct iwl_trans *trans)4133{4134struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);4135u32 val;41364137trans_pcie->me_present = -1;41384139INIT_DELAYED_WORK(&trans_pcie->me_recheck_wk,4140iwl_pcie_recheck_me_status);41414142/* we don't have a good way of determining this until BZ */4143if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)4144return;41454146val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1);4147if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) {4148trans_pcie->me_present =4149!!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT);4150return;4151}41524153val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);4154if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN |4155CSR_HW_IF_CONFIG_REG_IAMT_UP)) {4156trans_pcie->me_present = 1;4157return;4158}41594160/* recheck again later, ME might still be initializing */4161schedule_delayed_work(&trans_pcie->me_recheck_wk, HZ);4162}41634164int iwl_pci_gen1_2_probe(struct pci_dev *pdev,4165const struct pci_device_id *ent,4166const struct iwl_mac_cfg *mac_cfg,4167u8 __iomem *hw_base, u32 hw_rev)4168{4169const struct iwl_dev_info *dev_info;4170struct iwl_trans_info info = {4171.hw_id = (pdev->device << 16) + pdev->subsystem_device,4172.hw_rev = hw_rev,4173};4174struct iwl_trans *iwl_trans;4175struct iwl_trans_pcie *trans_pcie;4176int ret;41774178iwl_trans = iwl_trans_pcie_alloc(pdev, mac_cfg, &info, hw_base);4179if (IS_ERR(iwl_trans))4180return PTR_ERR(iwl_trans);41814182trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);41834184iwl_trans_pcie_check_product_reset_status(pdev);4185iwl_trans_pcie_check_product_reset_mode(pdev);41864187/* set the things we know so far for the grab NIC access */4188iwl_trans_set_info(iwl_trans, &info);41894190/*4191* Let's try to grab NIC access early here. Sometimes, NICs may4192* fail to initialize, and if that happens it's better if we see4193* issues early on (and can reprobe, per the logic inside), than4194* first trying to load the firmware etc. and potentially only4195* detecting any problems when the first interface is brought up.4196*/4197ret = iwl_pcie_prepare_card_hw(iwl_trans);4198if (!ret) {4199ret = iwl_finish_nic_init(iwl_trans);4200if (ret)4201goto out_free_trans;4202if (iwl_trans_grab_nic_access(iwl_trans)) {4203get_crf_id(iwl_trans, &info);4204/* all good */4205iwl_trans_release_nic_access(iwl_trans);4206} else {4207ret = -EIO;4208goto out_free_trans;4209}4210}42114212info.hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);42134214/*4215* The RF_ID is set to zero in blank OTP so read version to4216* extract the RF_ID.4217* This is relevant only for family 9000 and up.4218*/4219if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&4220!CSR_HW_RFID_TYPE(info.hw_rf_id) && map_crf_id(iwl_trans, &info)) {4221ret = -EINVAL;4222goto out_free_trans;4223}42244225IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",4226pdev->device, pdev->subsystem_device,4227info.hw_rev, info.hw_rf_id);42284229dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,4230CSR_HW_RFID_TYPE(info.hw_rf_id),4231CSR_HW_RFID_IS_CDB(info.hw_rf_id),4232IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),4233IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device),4234!iwl_trans->mac_cfg->integrated);4235if (dev_info) {4236iwl_trans->cfg = dev_info->cfg;4237info.name = dev_info->name;4238}42394240#if IS_ENABLED(CONFIG_IWLMVM)42414242/*4243* special-case 7265D, it has the same PCI IDs.4244*4245* Note that because we already pass the cfg to the transport above,4246* all the parameters that the transport uses must, until that is4247* changed, be identical to the ones in the 7265D configuration.4248*/4249if (iwl_trans->cfg == &iwl7265_cfg &&4250(info.hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)4251iwl_trans->cfg = &iwl7265d_cfg;4252#endif4253if (!iwl_trans->cfg) {4254pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",4255pdev->device, pdev->subsystem_device,4256info.hw_rev, info.hw_rf_id);4257ret = -EINVAL;4258goto out_free_trans;4259}42604261IWL_INFO(iwl_trans, "Detected %s\n", info.name);42624263if (iwl_trans->mac_cfg->mq_rx_supported) {4264if (WARN_ON(!iwl_trans->cfg->num_rbds)) {4265ret = -EINVAL;4266goto out_free_trans;4267}4268trans_pcie->num_rx_bufs = iwl_trans_get_num_rbds(iwl_trans);4269} else {4270trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;4271}42724273if (!iwl_trans->mac_cfg->integrated) {4274u16 link_status;42754276pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status);42774278info.pcie_link_speed =4279u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS);4280}42814282iwl_trans_set_info(iwl_trans, &info);42834284pci_set_drvdata(pdev, iwl_trans);42854286iwl_pcie_check_me_status(iwl_trans);42874288/* try to get ownership so that we'll know if we don't own it */4289iwl_pcie_prepare_card_hw(iwl_trans);42904291iwl_trans->drv = iwl_drv_start(iwl_trans);42924293if (IS_ERR(iwl_trans->drv)) {4294ret = PTR_ERR(iwl_trans->drv);4295goto out_free_trans;4296}42974298/* register transport layer debugfs here */4299iwl_trans_pcie_dbgfs_register(iwl_trans);43004301return 0;43024303out_free_trans:4304iwl_trans_pcie_free(iwl_trans);4305return ret;4306}430743084309