Path: blob/main/sys/contrib/dev/athk/ath10k/pci.c
107414 views
// SPDX-License-Identifier: ISC1/*2* Copyright (c) 2005-2011 Atheros Communications Inc.3* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.4* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.5* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.6*/78#if defined(__FreeBSD__)9#define LINUXKPI_PARAM_PREFIX ath10k_pci_10#endif1112#include <linux/pci.h>13#include <linux/module.h>14#include <linux/interrupt.h>15#include <linux/spinlock.h>16#include <linux/bitops.h>17#if defined(__FreeBSD__)18#include <linux/delay.h>19#include <sys/rman.h>20#endif2122#include "core.h"23#include "debug.h"24#include "coredump.h"2526#include "targaddrs.h"27#include "bmi.h"2829#include "hif.h"30#include "htc.h"3132#include "ce.h"33#include "pci.h"3435enum ath10k_pci_reset_mode {36ATH10K_PCI_RESET_AUTO = 0,37ATH10K_PCI_RESET_WARM_ONLY = 1,38};3940static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;41static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;4243module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);44MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");4546module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);47MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");4849/* how long wait to wait for target to initialise, in ms */50#define ATH10K_PCI_TARGET_WAIT 300051#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 35253/* Maximum number of bytes that can be handled atomically by54* diag read and write.55*/56#define ATH10K_DIAG_TRANSFER_LIMIT 0x50005758#define QCA99X0_PCIE_BAR0_START_REG 0x8103059#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c60#define QCA99X0_CPU_MEM_DATA_REG 0x4d0106162static const struct pci_device_id ath10k_pci_id_table[] = {63/* PCI-E QCA988X V2 (Ubiquiti branded) */64{ PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },6566{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */67{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */68{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */69{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */70{ PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */71{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */72{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */73{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */74{}75};7677static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {78/* QCA988X pre 2.0 chips are not supported because they need some nasty79* hacks. ath10k doesn't have them and these devices crash horribly80* because of that.81*/82{ QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },83{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },8485{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },86{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },87{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },88{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },89{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },9091{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },92{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },93{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },94{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },95{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },9697{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },9899{ QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },100101{ QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },102103{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },104{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },105106{ QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },107};108109static void ath10k_pci_buffer_cleanup(struct ath10k *ar);110static int ath10k_pci_cold_reset(struct ath10k *ar);111static int ath10k_pci_safe_chip_reset(struct ath10k *ar);112static int ath10k_pci_init_irq(struct ath10k *ar);113static int ath10k_pci_deinit_irq(struct ath10k *ar);114static int ath10k_pci_request_irq(struct ath10k *ar);115static void ath10k_pci_free_irq(struct ath10k *ar);116static int ath10k_pci_bmi_wait(struct ath10k *ar,117struct ath10k_ce_pipe *tx_pipe,118struct ath10k_ce_pipe *rx_pipe,119struct bmi_xfer *xfer);120static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);121static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);122static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);123static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);124static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);125static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);126static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);127128static const struct ce_attr pci_host_ce_config_wlan[] = {129/* CE0: host->target HTC control and raw streams */130{131.flags = CE_ATTR_FLAGS,132.src_nentries = 16,133.src_sz_max = 256,134.dest_nentries = 0,135.send_cb = ath10k_pci_htc_tx_cb,136},137138/* CE1: target->host HTT + HTC control */139{140.flags = CE_ATTR_FLAGS,141.src_nentries = 0,142.src_sz_max = 2048,143.dest_nentries = 512,144.recv_cb = ath10k_pci_htt_htc_rx_cb,145},146147/* CE2: target->host WMI */148{149.flags = CE_ATTR_FLAGS,150.src_nentries = 0,151.src_sz_max = 2048,152.dest_nentries = 128,153.recv_cb = ath10k_pci_htc_rx_cb,154},155156/* CE3: host->target WMI */157{158.flags = CE_ATTR_FLAGS,159.src_nentries = 32,160.src_sz_max = 2048,161.dest_nentries = 0,162.send_cb = ath10k_pci_htc_tx_cb,163},164165/* CE4: host->target HTT */166{167.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,168.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,169.src_sz_max = 256,170.dest_nentries = 0,171.send_cb = ath10k_pci_htt_tx_cb,172},173174/* CE5: target->host HTT (HIF->HTT) */175{176.flags = CE_ATTR_FLAGS,177.src_nentries = 0,178.src_sz_max = 512,179.dest_nentries = 512,180.recv_cb = ath10k_pci_htt_rx_cb,181},182183/* CE6: target autonomous hif_memcpy */184{185.flags = CE_ATTR_FLAGS,186.src_nentries = 0,187.src_sz_max = 0,188.dest_nentries = 0,189},190191/* CE7: ce_diag, the Diagnostic Window */192{193.flags = CE_ATTR_FLAGS | CE_ATTR_POLL,194.src_nentries = 2,195.src_sz_max = DIAG_TRANSFER_LIMIT,196.dest_nentries = 2,197},198199/* CE8: target->host pktlog */200{201.flags = CE_ATTR_FLAGS,202.src_nentries = 0,203.src_sz_max = 2048,204.dest_nentries = 128,205.recv_cb = ath10k_pci_pktlog_rx_cb,206},207208/* CE9 target autonomous qcache memcpy */209{210.flags = CE_ATTR_FLAGS,211.src_nentries = 0,212.src_sz_max = 0,213.dest_nentries = 0,214},215216/* CE10: target autonomous hif memcpy */217{218.flags = CE_ATTR_FLAGS,219.src_nentries = 0,220.src_sz_max = 0,221.dest_nentries = 0,222},223224/* CE11: target autonomous hif memcpy */225{226.flags = CE_ATTR_FLAGS,227.src_nentries = 0,228.src_sz_max = 0,229.dest_nentries = 0,230},231};232233/* Target firmware's Copy Engine configuration. */234static const struct ce_pipe_config pci_target_ce_config_wlan[] = {235/* CE0: host->target HTC control and raw streams */236{237.pipenum = __cpu_to_le32(0),238.pipedir = __cpu_to_le32(PIPEDIR_OUT),239.nentries = __cpu_to_le32(32),240.nbytes_max = __cpu_to_le32(256),241.flags = __cpu_to_le32(CE_ATTR_FLAGS),242.reserved = __cpu_to_le32(0),243},244245/* CE1: target->host HTT + HTC control */246{247.pipenum = __cpu_to_le32(1),248.pipedir = __cpu_to_le32(PIPEDIR_IN),249.nentries = __cpu_to_le32(32),250.nbytes_max = __cpu_to_le32(2048),251.flags = __cpu_to_le32(CE_ATTR_FLAGS),252.reserved = __cpu_to_le32(0),253},254255/* CE2: target->host WMI */256{257.pipenum = __cpu_to_le32(2),258.pipedir = __cpu_to_le32(PIPEDIR_IN),259.nentries = __cpu_to_le32(64),260.nbytes_max = __cpu_to_le32(2048),261.flags = __cpu_to_le32(CE_ATTR_FLAGS),262.reserved = __cpu_to_le32(0),263},264265/* CE3: host->target WMI */266{267.pipenum = __cpu_to_le32(3),268.pipedir = __cpu_to_le32(PIPEDIR_OUT),269.nentries = __cpu_to_le32(32),270.nbytes_max = __cpu_to_le32(2048),271.flags = __cpu_to_le32(CE_ATTR_FLAGS),272.reserved = __cpu_to_le32(0),273},274275/* CE4: host->target HTT */276{277.pipenum = __cpu_to_le32(4),278.pipedir = __cpu_to_le32(PIPEDIR_OUT),279.nentries = __cpu_to_le32(256),280.nbytes_max = __cpu_to_le32(256),281.flags = __cpu_to_le32(CE_ATTR_FLAGS),282.reserved = __cpu_to_le32(0),283},284285/* NB: 50% of src nentries, since tx has 2 frags */286287/* CE5: target->host HTT (HIF->HTT) */288{289.pipenum = __cpu_to_le32(5),290.pipedir = __cpu_to_le32(PIPEDIR_IN),291.nentries = __cpu_to_le32(32),292.nbytes_max = __cpu_to_le32(512),293.flags = __cpu_to_le32(CE_ATTR_FLAGS),294.reserved = __cpu_to_le32(0),295},296297/* CE6: Reserved for target autonomous hif_memcpy */298{299.pipenum = __cpu_to_le32(6),300.pipedir = __cpu_to_le32(PIPEDIR_INOUT),301.nentries = __cpu_to_le32(32),302.nbytes_max = __cpu_to_le32(4096),303.flags = __cpu_to_le32(CE_ATTR_FLAGS),304.reserved = __cpu_to_le32(0),305},306307/* CE7 used only by Host */308{309.pipenum = __cpu_to_le32(7),310.pipedir = __cpu_to_le32(PIPEDIR_INOUT),311.nentries = __cpu_to_le32(0),312.nbytes_max = __cpu_to_le32(0),313.flags = __cpu_to_le32(0),314.reserved = __cpu_to_le32(0),315},316317/* CE8 target->host packtlog */318{319.pipenum = __cpu_to_le32(8),320.pipedir = __cpu_to_le32(PIPEDIR_IN),321.nentries = __cpu_to_le32(64),322.nbytes_max = __cpu_to_le32(2048),323.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),324.reserved = __cpu_to_le32(0),325},326327/* CE9 target autonomous qcache memcpy */328{329.pipenum = __cpu_to_le32(9),330.pipedir = __cpu_to_le32(PIPEDIR_INOUT),331.nentries = __cpu_to_le32(32),332.nbytes_max = __cpu_to_le32(2048),333.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),334.reserved = __cpu_to_le32(0),335},336337/* It not necessary to send target wlan configuration for CE10 & CE11338* as these CEs are not actively used in target.339*/340};341342/*343* Map from service/endpoint to Copy Engine.344* This table is derived from the CE_PCI TABLE, above.345* It is passed to the Target at startup for use by firmware.346*/347static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {348{349__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),350__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */351__cpu_to_le32(3),352},353{354__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),355__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */356__cpu_to_le32(2),357},358{359__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),360__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */361__cpu_to_le32(3),362},363{364__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),365__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */366__cpu_to_le32(2),367},368{369__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),370__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */371__cpu_to_le32(3),372},373{374__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),375__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */376__cpu_to_le32(2),377},378{379__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),380__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */381__cpu_to_le32(3),382},383{384__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),385__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */386__cpu_to_le32(2),387},388{389__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),390__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */391__cpu_to_le32(3),392},393{394__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),395__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */396__cpu_to_le32(2),397},398{399__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),400__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */401__cpu_to_le32(0),402},403{404__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),405__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */406__cpu_to_le32(1),407},408{ /* not used */409__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),410__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */411__cpu_to_le32(0),412},413{ /* not used */414__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),415__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */416__cpu_to_le32(1),417},418{419__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),420__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */421__cpu_to_le32(4),422},423{424__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),425__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */426__cpu_to_le32(5),427},428429/* (Additions here) */430431{ /* must be last */432__cpu_to_le32(0),433__cpu_to_le32(0),434__cpu_to_le32(0),435},436};437438static bool ath10k_pci_is_awake(struct ath10k *ar)439{440struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);441#if defined(__linux__)442u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +443RTC_STATE_ADDRESS);444#elif defined(__FreeBSD__)445u32 val = bus_read_4((struct resource *)ar_pci->mem, PCIE_LOCAL_BASE_ADDRESS +446RTC_STATE_ADDRESS);447#endif448449return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;450}451452static void __ath10k_pci_wake(struct ath10k *ar)453{454struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);455456lockdep_assert_held(&ar_pci->ps_lock);457458ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",459ar_pci->ps_wake_refcount, ar_pci->ps_awake);460461#if defined(__linux__)462iowrite32(PCIE_SOC_WAKE_V_MASK,463ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +464PCIE_SOC_WAKE_ADDRESS);465#elif defined(__FreeBSD__)466bus_write_4((struct resource *)ar_pci->mem,467PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,468PCIE_SOC_WAKE_V_MASK);469#endif470}471472static void __ath10k_pci_sleep(struct ath10k *ar)473{474struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);475476lockdep_assert_held(&ar_pci->ps_lock);477478ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",479ar_pci->ps_wake_refcount, ar_pci->ps_awake);480481#if defined(__linux__)482iowrite32(PCIE_SOC_WAKE_RESET,483ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +484PCIE_SOC_WAKE_ADDRESS);485#elif defined(__FreeBSD__)486bus_write_4((struct resource *)ar_pci->mem,487PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,488PCIE_SOC_WAKE_RESET);489#endif490ar_pci->ps_awake = false;491}492493static int ath10k_pci_wake_wait(struct ath10k *ar)494{495int tot_delay = 0;496int curr_delay = 5;497498while (tot_delay < PCIE_WAKE_TIMEOUT) {499if (ath10k_pci_is_awake(ar)) {500if (tot_delay > PCIE_WAKE_LATE_US)501ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",502tot_delay / 1000);503return 0;504}505506udelay(curr_delay);507tot_delay += curr_delay;508509if (curr_delay < 50)510curr_delay += 5;511}512513return -ETIMEDOUT;514}515516static int ath10k_pci_force_wake(struct ath10k *ar)517{518struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);519unsigned long flags;520int ret = 0;521522if (ar_pci->pci_ps)523return ret;524525spin_lock_irqsave(&ar_pci->ps_lock, flags);526527if (!ar_pci->ps_awake) {528#if defined(__linux__)529iowrite32(PCIE_SOC_WAKE_V_MASK,530ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +531PCIE_SOC_WAKE_ADDRESS);532#elif defined(__FreeBSD__)533bus_write_4((struct resource *)ar_pci->mem,534PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,535PCIE_SOC_WAKE_V_MASK);536#endif537538ret = ath10k_pci_wake_wait(ar);539if (ret == 0)540ar_pci->ps_awake = true;541}542543spin_unlock_irqrestore(&ar_pci->ps_lock, flags);544545return ret;546}547548static void ath10k_pci_force_sleep(struct ath10k *ar)549{550struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);551unsigned long flags;552553spin_lock_irqsave(&ar_pci->ps_lock, flags);554555#if defined(__linux__)556iowrite32(PCIE_SOC_WAKE_RESET,557ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +558PCIE_SOC_WAKE_ADDRESS);559#elif defined(__FreeBSD__)560bus_write_4((struct resource *)ar_pci->mem,561PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,562PCIE_SOC_WAKE_RESET);563#endif564ar_pci->ps_awake = false;565566spin_unlock_irqrestore(&ar_pci->ps_lock, flags);567}568569static int ath10k_pci_wake(struct ath10k *ar)570{571struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);572unsigned long flags;573int ret = 0;574575if (ar_pci->pci_ps == 0)576return ret;577578spin_lock_irqsave(&ar_pci->ps_lock, flags);579580ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",581ar_pci->ps_wake_refcount, ar_pci->ps_awake);582583/* This function can be called very frequently. To avoid excessive584* CPU stalls for MMIO reads use a cache var to hold the device state.585*/586if (!ar_pci->ps_awake) {587__ath10k_pci_wake(ar);588589ret = ath10k_pci_wake_wait(ar);590if (ret == 0)591ar_pci->ps_awake = true;592}593594if (ret == 0) {595ar_pci->ps_wake_refcount++;596WARN_ON(ar_pci->ps_wake_refcount == 0);597}598599spin_unlock_irqrestore(&ar_pci->ps_lock, flags);600601return ret;602}603604static void ath10k_pci_sleep(struct ath10k *ar)605{606struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);607unsigned long flags;608609if (ar_pci->pci_ps == 0)610return;611612spin_lock_irqsave(&ar_pci->ps_lock, flags);613614ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",615ar_pci->ps_wake_refcount, ar_pci->ps_awake);616617if (WARN_ON(ar_pci->ps_wake_refcount == 0))618goto skip;619620ar_pci->ps_wake_refcount--;621622mod_timer(&ar_pci->ps_timer, jiffies +623msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));624625skip:626spin_unlock_irqrestore(&ar_pci->ps_lock, flags);627}628629static void ath10k_pci_ps_timer(struct timer_list *t)630{631struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, ps_timer);632struct ath10k *ar = ar_pci->ar;633unsigned long flags;634635spin_lock_irqsave(&ar_pci->ps_lock, flags);636637ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",638ar_pci->ps_wake_refcount, ar_pci->ps_awake);639640if (ar_pci->ps_wake_refcount > 0)641goto skip;642643__ath10k_pci_sleep(ar);644645skip:646spin_unlock_irqrestore(&ar_pci->ps_lock, flags);647}648649static void ath10k_pci_sleep_sync(struct ath10k *ar)650{651struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);652unsigned long flags;653654if (ar_pci->pci_ps == 0) {655ath10k_pci_force_sleep(ar);656return;657}658659timer_delete_sync(&ar_pci->ps_timer);660661spin_lock_irqsave(&ar_pci->ps_lock, flags);662WARN_ON(ar_pci->ps_wake_refcount > 0);663__ath10k_pci_sleep(ar);664spin_unlock_irqrestore(&ar_pci->ps_lock, flags);665}666667static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)668{669struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);670int ret;671672if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {673ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",674offset, offset + sizeof(value), ar_pci->mem_len);675return;676}677678ret = ath10k_pci_wake(ar);679if (ret) {680ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",681value, offset, ret);682return;683}684685#if defined(__linux__)686iowrite32(value, ar_pci->mem + offset);687#elif defined(__FreeBSD__)688bus_write_4((struct resource *)ar_pci->mem, offset, value);689#endif690ath10k_pci_sleep(ar);691}692693static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)694{695struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);696u32 val;697int ret;698699if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {700ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",701offset, offset + sizeof(val), ar_pci->mem_len);702return 0;703}704705ret = ath10k_pci_wake(ar);706if (ret) {707ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",708offset, ret);709return 0xffffffff;710}711712#if defined(__linux__)713val = ioread32(ar_pci->mem + offset);714#elif defined(__FreeBSD__)715val = bus_read_4((struct resource *)ar_pci->mem, offset);716#endif717ath10k_pci_sleep(ar);718719return val;720}721722inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)723{724struct ath10k_ce *ce = ath10k_ce_priv(ar);725726ce->bus_ops->write32(ar, offset, value);727}728729inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)730{731struct ath10k_ce *ce = ath10k_ce_priv(ar);732733return ce->bus_ops->read32(ar, offset);734}735736u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)737{738return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);739}740741void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)742{743ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);744}745746u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)747{748return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);749}750751void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)752{753ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);754}755756bool ath10k_pci_irq_pending(struct ath10k *ar)757{758u32 cause;759760/* Check if the shared legacy irq is for us */761cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +762PCIE_INTR_CAUSE_ADDRESS);763if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))764return true;765766return false;767}768769void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar)770{771/* IMPORTANT: INTR_CLR register has to be set after772* INTR_ENABLE is set to 0, otherwise interrupt can not be773* really cleared.774*/775ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,7760);777ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,778PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);779780/* IMPORTANT: this extra read transaction is required to781* flush the posted write buffer.782*/783(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +784PCIE_INTR_ENABLE_ADDRESS);785}786787void ath10k_pci_enable_intx_irq(struct ath10k *ar)788{789ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +790PCIE_INTR_ENABLE_ADDRESS,791PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);792793/* IMPORTANT: this extra read transaction is required to794* flush the posted write buffer.795*/796(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +797PCIE_INTR_ENABLE_ADDRESS);798}799800static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)801{802struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);803804if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)805return "msi";806807return "legacy";808}809810static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)811{812struct ath10k *ar = pipe->hif_ce_state;813struct ath10k_ce *ce = ath10k_ce_priv(ar);814struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;815struct sk_buff *skb;816dma_addr_t paddr;817int ret;818819skb = dev_alloc_skb(pipe->buf_sz);820if (!skb)821return -ENOMEM;822823WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");824825paddr = dma_map_single(ar->dev, skb->data,826skb->len + skb_tailroom(skb),827DMA_FROM_DEVICE);828if (unlikely(dma_mapping_error(ar->dev, paddr))) {829ath10k_warn(ar, "failed to dma map pci rx buf\n");830dev_kfree_skb_any(skb);831return -EIO;832}833834ATH10K_SKB_RXCB(skb)->paddr = paddr;835836spin_lock_bh(&ce->ce_lock);837ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);838spin_unlock_bh(&ce->ce_lock);839if (ret) {840dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),841DMA_FROM_DEVICE);842dev_kfree_skb_any(skb);843return ret;844}845846return 0;847}848849static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)850{851struct ath10k *ar = pipe->hif_ce_state;852struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);853struct ath10k_ce *ce = ath10k_ce_priv(ar);854struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;855int ret, num;856857if (pipe->buf_sz == 0)858return;859860if (!ce_pipe->dest_ring)861return;862863spin_lock_bh(&ce->ce_lock);864num = __ath10k_ce_rx_num_free_bufs(ce_pipe);865spin_unlock_bh(&ce->ce_lock);866867while (num >= 0) {868ret = __ath10k_pci_rx_post_buf(pipe);869if (ret) {870if (ret == -ENOSPC)871break;872ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);873mod_timer(&ar_pci->rx_post_retry, jiffies +874ATH10K_PCI_RX_POST_RETRY_MS);875break;876}877num--;878}879}880881void ath10k_pci_rx_post(struct ath10k *ar)882{883struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);884int i;885886for (i = 0; i < CE_COUNT; i++)887ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);888}889890void ath10k_pci_rx_replenish_retry(struct timer_list *t)891{892struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t,893rx_post_retry);894struct ath10k *ar = ar_pci->ar;895896ath10k_pci_rx_post(ar);897}898899static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)900{901u32 val = 0, region = addr & 0xfffff;902903val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)904& 0x7ff) << 21;905val |= 0x100000 | region;906return val;907}908909/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.910* Support to access target space below 1M for qca6174 and qca9377.911* If target space is below 1M, the bit[20] of converted CE addr is 0.912* Otherwise bit[20] of converted CE addr is 1.913*/914static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)915{916u32 val = 0, region = addr & 0xfffff;917918val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)919& 0x7ff) << 21;920val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;921return val;922}923924static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)925{926u32 val = 0, region = addr & 0xfffff;927928val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);929val |= 0x100000 | region;930return val;931}932933static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)934{935struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);936937if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))938return -EOPNOTSUPP;939940return ar_pci->targ_cpu_to_ce_addr(ar, addr);941}942943/*944* Diagnostic read/write access is provided for startup/config/debug usage.945* Caller must guarantee proper alignment, when applicable, and single user946* at any moment.947*/948#if defined(__linux__)949static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,950#elif defined(__FreeBSD__)951static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, u8 *data,952#endif953int nbytes)954{955struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);956int ret = 0;957u32 *buf;958unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;959struct ath10k_ce_pipe *ce_diag;960/* Host buffer address in CE space */961u32 ce_data;962dma_addr_t ce_data_base = 0;963void *data_buf;964int i;965966mutex_lock(&ar_pci->ce_diag_mutex);967ce_diag = ar_pci->ce_diag;968969/*970* Allocate a temporary bounce buffer to hold caller's data971* to be DMA'ed from Target. This guarantees972* 1) 4-byte alignment973* 2) Buffer in DMA-able space974*/975alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);976977data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,978GFP_ATOMIC);979if (!data_buf) {980ret = -ENOMEM;981goto done;982}983984/* The address supplied by the caller is in the985* Target CPU virtual address space.986*987* In order to use this address with the diagnostic CE,988* convert it from Target CPU virtual address space989* to CE address space990*/991address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);992993remaining_bytes = nbytes;994ce_data = ce_data_base;995while (remaining_bytes) {996nbytes = min_t(unsigned int, remaining_bytes,997DIAG_TRANSFER_LIMIT);998999ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);1000if (ret != 0)1001goto done;10021003/* Request CE to send from Target(!) address to Host buffer */1004ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);1005if (ret)1006goto done;10071008i = 0;1009while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {1010udelay(DIAG_ACCESS_CE_WAIT_US);1011i += DIAG_ACCESS_CE_WAIT_US;10121013if (i > DIAG_ACCESS_CE_TIMEOUT_US) {1014ret = -EBUSY;1015goto done;1016}1017}10181019i = 0;1020while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,1021&completed_nbytes) != 0) {1022udelay(DIAG_ACCESS_CE_WAIT_US);1023i += DIAG_ACCESS_CE_WAIT_US;10241025if (i > DIAG_ACCESS_CE_TIMEOUT_US) {1026ret = -EBUSY;1027goto done;1028}1029}10301031if (nbytes != completed_nbytes) {1032ret = -EIO;1033goto done;1034}10351036if (*buf != ce_data) {1037ret = -EIO;1038goto done;1039}10401041remaining_bytes -= nbytes;1042memcpy(data, data_buf, nbytes);10431044address += nbytes;1045data += nbytes;1046}10471048done:10491050if (data_buf)1051dma_free_coherent(ar->dev, alloc_nbytes, data_buf,1052ce_data_base);10531054mutex_unlock(&ar_pci->ce_diag_mutex);10551056return ret;1057}10581059static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)1060{1061__le32 val = 0;1062int ret;10631064#if defined(__linux__)1065ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));1066#elif defined(__FreeBSD__)1067ret = ath10k_pci_diag_read_mem(ar, address, (u8 *)&val, sizeof(val));1068#endif1069*value = __le32_to_cpu(val);10701071return ret;1072}10731074static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,1075u32 src, u32 len)1076{1077u32 host_addr, addr;1078int ret;10791080host_addr = host_interest_item_address(src);10811082ret = ath10k_pci_diag_read32(ar, host_addr, &addr);1083if (ret != 0) {1084ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",1085src, ret);1086return ret;1087}10881089ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);1090if (ret != 0) {1091ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",1092addr, len, ret);1093return ret;1094}10951096return 0;1097}10981099#define ath10k_pci_diag_read_hi(ar, dest, src, len) \1100__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)11011102int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,1103#if defined(__linux__)1104const void *data, int nbytes)1105#elif defined(__FreeBSD__)1106const void *_d, int nbytes)1107#endif1108{1109struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);1110int ret = 0;1111u32 *buf;1112unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;1113struct ath10k_ce_pipe *ce_diag;1114void *data_buf;1115dma_addr_t ce_data_base = 0;1116int i;1117#if defined(__FreeBSD__)1118const u8 *data = _d;1119#endif11201121mutex_lock(&ar_pci->ce_diag_mutex);1122ce_diag = ar_pci->ce_diag;11231124/*1125* Allocate a temporary bounce buffer to hold caller's data1126* to be DMA'ed to Target. This guarantees1127* 1) 4-byte alignment1128* 2) Buffer in DMA-able space1129*/1130alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);11311132data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,1133GFP_ATOMIC);1134if (!data_buf) {1135ret = -ENOMEM;1136goto done;1137}11381139/*1140* The address supplied by the caller is in the1141* Target CPU virtual address space.1142*1143* In order to use this address with the diagnostic CE,1144* convert it from1145* Target CPU virtual address space1146* to1147* CE address space1148*/1149address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);11501151remaining_bytes = nbytes;1152while (remaining_bytes) {1153/* FIXME: check cast */1154nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);11551156/* Copy caller's data to allocated DMA buf */1157memcpy(data_buf, data, nbytes);11581159/* Set up to receive directly into Target(!) address */1160ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);1161if (ret != 0)1162goto done;11631164/*1165* Request CE to send caller-supplied data that1166* was copied to bounce buffer to Target(!) address.1167*/1168ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);1169if (ret != 0)1170goto done;11711172i = 0;1173while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {1174udelay(DIAG_ACCESS_CE_WAIT_US);1175i += DIAG_ACCESS_CE_WAIT_US;11761177if (i > DIAG_ACCESS_CE_TIMEOUT_US) {1178ret = -EBUSY;1179goto done;1180}1181}11821183i = 0;1184while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,1185&completed_nbytes) != 0) {1186udelay(DIAG_ACCESS_CE_WAIT_US);1187i += DIAG_ACCESS_CE_WAIT_US;11881189if (i > DIAG_ACCESS_CE_TIMEOUT_US) {1190ret = -EBUSY;1191goto done;1192}1193}11941195if (nbytes != completed_nbytes) {1196ret = -EIO;1197goto done;1198}11991200if (*buf != address) {1201ret = -EIO;1202goto done;1203}12041205remaining_bytes -= nbytes;1206address += nbytes;1207data += nbytes;1208}12091210done:1211if (data_buf) {1212dma_free_coherent(ar->dev, alloc_nbytes, data_buf,1213ce_data_base);1214}12151216if (ret != 0)1217ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",1218address, ret);12191220mutex_unlock(&ar_pci->ce_diag_mutex);12211222return ret;1223}12241225static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)1226{1227__le32 val = __cpu_to_le32(value);12281229return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));1230}12311232/* Called by lower (CE) layer when a send to Target completes. */1233static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)1234{1235struct ath10k *ar = ce_state->ar;1236struct sk_buff_head list;1237struct sk_buff *skb;12381239__skb_queue_head_init(&list);1240while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {1241/* no need to call tx completion for NULL pointers */1242if (skb == NULL)1243continue;12441245__skb_queue_tail(&list, skb);1246}12471248while ((skb = __skb_dequeue(&list)))1249ath10k_htc_tx_completion_handler(ar, skb);1250}12511252static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,1253void (*callback)(struct ath10k *ar,1254struct sk_buff *skb))1255{1256struct ath10k *ar = ce_state->ar;1257struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);1258struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];1259struct sk_buff *skb;1260struct sk_buff_head list;1261void *transfer_context;1262unsigned int nbytes, max_nbytes;12631264__skb_queue_head_init(&list);1265while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,1266&nbytes) == 0) {1267skb = transfer_context;1268max_nbytes = skb->len + skb_tailroom(skb);1269dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,1270max_nbytes, DMA_FROM_DEVICE);12711272if (unlikely(max_nbytes < nbytes)) {1273ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",1274nbytes, max_nbytes);1275dev_kfree_skb_any(skb);1276continue;1277}12781279skb_put(skb, nbytes);1280__skb_queue_tail(&list, skb);1281}12821283while ((skb = __skb_dequeue(&list))) {1284ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",1285ce_state->id, skb->len);1286ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",1287skb->data, skb->len);12881289callback(ar, skb);1290}12911292ath10k_pci_rx_post_pipe(pipe_info);1293}12941295static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,1296void (*callback)(struct ath10k *ar,1297struct sk_buff *skb))1298{1299struct ath10k *ar = ce_state->ar;1300struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);1301struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];1302struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;1303struct sk_buff *skb;1304struct sk_buff_head list;1305void *transfer_context;1306unsigned int nbytes, max_nbytes, nentries;1307int orig_len;13081309/* No need to acquire ce_lock for CE5, since this is the only place CE51310* is processed other than init and deinit. Before releasing CE51311* buffers, interrupts are disabled. Thus CE5 access is serialized.1312*/1313__skb_queue_head_init(&list);1314while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,1315&nbytes) == 0) {1316skb = transfer_context;1317max_nbytes = skb->len + skb_tailroom(skb);13181319if (unlikely(max_nbytes < nbytes)) {1320ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",1321nbytes, max_nbytes);1322continue;1323}13241325dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,1326max_nbytes, DMA_FROM_DEVICE);1327skb_put(skb, nbytes);1328__skb_queue_tail(&list, skb);1329}13301331nentries = skb_queue_len(&list);1332while ((skb = __skb_dequeue(&list))) {1333ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",1334ce_state->id, skb->len);1335ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",1336skb->data, skb->len);13371338orig_len = skb->len;1339callback(ar, skb);1340skb_push(skb, orig_len - skb->len);1341skb_reset_tail_pointer(skb);1342skb_trim(skb, 0);13431344/*let device gain the buffer again*/1345dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,1346skb->len + skb_tailroom(skb),1347DMA_FROM_DEVICE);1348}1349ath10k_ce_rx_update_write_idx(ce_pipe, nentries);1350}13511352/* Called by lower (CE) layer when data is received from the Target. */1353static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)1354{1355ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);1356}13571358static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)1359{1360/* CE4 polling needs to be done whenever CE pipe which transports1361* HTT Rx (target->host) is processed.1362*/1363ath10k_ce_per_engine_service(ce_state->ar, 4);13641365ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);1366}13671368/* Called by lower (CE) layer when data is received from the Target.1369* Only 10.4 firmware uses separate CE to transfer pktlog data.1370*/1371static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)1372{1373ath10k_pci_process_rx_cb(ce_state,1374ath10k_htt_rx_pktlog_completion_handler);1375}13761377/* Called by lower (CE) layer when a send to HTT Target completes. */1378static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)1379{1380struct ath10k *ar = ce_state->ar;1381struct sk_buff *skb;13821383while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {1384/* no need to call tx completion for NULL pointers */1385if (!skb)1386continue;13871388dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,1389skb->len, DMA_TO_DEVICE);1390ath10k_htt_hif_tx_complete(ar, skb);1391}1392}13931394static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)1395{1396skb_pull(skb, sizeof(struct ath10k_htc_hdr));1397ath10k_htt_t2h_msg_handler(ar, skb);1398}13991400/* Called by lower (CE) layer when HTT data is received from the Target. */1401static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)1402{1403/* CE4 polling needs to be done whenever CE pipe which transports1404* HTT Rx (target->host) is processed.1405*/1406ath10k_ce_per_engine_service(ce_state->ar, 4);14071408ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);1409}14101411int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,1412struct ath10k_hif_sg_item *items, int n_items)1413{1414struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);1415struct ath10k_ce *ce = ath10k_ce_priv(ar);1416struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];1417struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;1418struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;1419unsigned int nentries_mask;1420unsigned int sw_index;1421unsigned int write_index;1422int err, i = 0;14231424spin_lock_bh(&ce->ce_lock);14251426nentries_mask = src_ring->nentries_mask;1427sw_index = src_ring->sw_index;1428write_index = src_ring->write_index;14291430if (unlikely(CE_RING_DELTA(nentries_mask,1431write_index, sw_index - 1) < n_items)) {1432err = -ENOBUFS;1433goto err;1434}14351436for (i = 0; i < n_items - 1; i++) {1437ath10k_dbg(ar, ATH10K_DBG_PCI,1438"pci tx item %d paddr %pad len %d n_items %d\n",1439i, &items[i].paddr, items[i].len, n_items);1440ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",1441items[i].vaddr, items[i].len);14421443err = ath10k_ce_send_nolock(ce_pipe,1444items[i].transfer_context,1445items[i].paddr,1446items[i].len,1447items[i].transfer_id,1448CE_SEND_FLAG_GATHER);1449if (err)1450goto err;1451}14521453/* `i` is equal to `n_items -1` after for() */14541455ath10k_dbg(ar, ATH10K_DBG_PCI,1456#if defined(__linux__)1457"pci tx item %d paddr %pad len %d n_items %d\n",1458i, &items[i].paddr, items[i].len, n_items);1459#elif defined(__FreeBSD__)1460"pci tx item %d paddr %pad len %d n_items %d pipe_id %u\n",1461i, &items[i].paddr, items[i].len, n_items, pipe_id);1462/*1463* XXX-BZ specific debug; the DELAY makes things work for one chipset.1464* There's likely a race somewhere (here or LinuxKPI).1465*/1466if (n_items == 1 && items[i].len == 140) {1467ath10k_dbg_dump(ar, ATH10K_DBG_PCI, NULL, "pci tx data: ",1468items[i].vaddr, items[i].len);1469dump_stack();1470DELAY(500);1471}1472#endif1473ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",1474items[i].vaddr, items[i].len);14751476err = ath10k_ce_send_nolock(ce_pipe,1477items[i].transfer_context,1478items[i].paddr,1479items[i].len,1480items[i].transfer_id,14810);1482if (err)1483goto err;14841485spin_unlock_bh(&ce->ce_lock);1486return 0;14871488err:1489for (; i > 0; i--)1490__ath10k_ce_send_revert(ce_pipe);14911492spin_unlock_bh(&ce->ce_lock);1493return err;1494}14951496int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,1497size_t buf_len)1498{1499return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);1500}15011502u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)1503{1504struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);15051506ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");15071508return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);1509}15101511static void ath10k_pci_dump_registers(struct ath10k *ar,1512struct ath10k_fw_crash_data *crash_data)1513{1514__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};1515int i, ret;15161517lockdep_assert_held(&ar->dump_mutex);15181519ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],1520hi_failure_state,1521REG_DUMP_COUNT_QCA988X * sizeof(__le32));1522if (ret) {1523ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);1524return;1525}15261527BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);15281529ath10k_err(ar, "firmware register dump:\n");1530for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)1531ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",1532i,1533__le32_to_cpu(reg_dump_values[i]),1534__le32_to_cpu(reg_dump_values[i + 1]),1535__le32_to_cpu(reg_dump_values[i + 2]),1536__le32_to_cpu(reg_dump_values[i + 3]));15371538if (!crash_data)1539return;15401541for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)1542crash_data->registers[i] = reg_dump_values[i];1543}15441545static int ath10k_pci_dump_memory_section(struct ath10k *ar,1546const struct ath10k_mem_region *mem_region,1547u8 *buf, size_t buf_len)1548{1549const struct ath10k_mem_section *cur_section, *next_section;1550unsigned int count, section_size, skip_size;1551int ret, i, j;15521553if (!mem_region || !buf)1554return 0;15551556cur_section = &mem_region->section_table.sections[0];15571558if (mem_region->start > cur_section->start) {1559ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",1560mem_region->start, cur_section->start);1561return 0;1562}15631564skip_size = cur_section->start - mem_region->start;15651566/* fill the gap between the first register section and register1567* start address1568*/1569for (i = 0; i < skip_size; i++) {1570*buf = ATH10K_MAGIC_NOT_COPIED;1571buf++;1572}15731574count = 0;15751576for (i = 0; cur_section != NULL; i++) {1577section_size = cur_section->end - cur_section->start;15781579if (section_size <= 0) {1580ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",1581cur_section->start,1582cur_section->end);1583break;1584}15851586if ((i + 1) == mem_region->section_table.size) {1587/* last section */1588next_section = NULL;1589skip_size = 0;1590} else {1591next_section = cur_section + 1;15921593if (cur_section->end > next_section->start) {1594ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",1595next_section->start,1596cur_section->end);1597break;1598}15991600skip_size = next_section->start - cur_section->end;1601}16021603if (buf_len < (skip_size + section_size)) {1604ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);1605break;1606}16071608buf_len -= skip_size + section_size;16091610/* read section to dest memory */1611ret = ath10k_pci_diag_read_mem(ar, cur_section->start,1612buf, section_size);1613if (ret) {1614ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",1615cur_section->start, ret);1616break;1617}16181619buf += section_size;1620count += section_size;16211622/* fill in the gap between this section and the next */1623for (j = 0; j < skip_size; j++) {1624*buf = ATH10K_MAGIC_NOT_COPIED;1625buf++;1626}16271628count += skip_size;16291630if (!next_section)1631/* this was the last section */1632break;16331634cur_section = next_section;1635}16361637return count;1638}16391640static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)1641{1642u32 val;16431644ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +1645FW_RAM_CONFIG_ADDRESS, config);16461647val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +1648FW_RAM_CONFIG_ADDRESS);1649if (val != config) {1650ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",1651val, config);1652return -EIO;1653}16541655return 0;1656}16571658/* Always returns the length */1659static int ath10k_pci_dump_memory_sram(struct ath10k *ar,1660const struct ath10k_mem_region *region,1661u8 *buf)1662{1663struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);1664u32 base_addr, i;16651666#if defined(__linux__)1667base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);1668#elif defined(__FreeBSD__)1669base_addr = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_PCIE_BAR0_START_REG);1670#endif1671base_addr += region->start;16721673for (i = 0; i < region->len; i += 4) {1674#if defined(__linux__)1675iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);1676*(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);1677#elif defined(__FreeBSD__)1678bus_write_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_ADDR_REG, base_addr + i);1679*(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_DATA_REG);1680#endif1681}16821683return region->len;1684}16851686/* if an error happened returns < 0, otherwise the length */1687static int ath10k_pci_dump_memory_reg(struct ath10k *ar,1688const struct ath10k_mem_region *region,1689u8 *buf)1690{1691struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);1692u32 i;1693int ret;16941695mutex_lock(&ar->conf_mutex);1696if (ar->state != ATH10K_STATE_ON) {1697ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");1698ret = -EIO;1699goto done;1700}17011702for (i = 0; i < region->len; i += 4)1703#if defined(__linux__)1704*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);1705#elif defined(__FreeBSD__)1706*(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, region->start + i);1707#endif17081709ret = region->len;1710done:1711mutex_unlock(&ar->conf_mutex);1712return ret;1713}17141715/* if an error happened returns < 0, otherwise the length */1716static int ath10k_pci_dump_memory_generic(struct ath10k *ar,1717const struct ath10k_mem_region *current_region,1718u8 *buf)1719{1720int ret;17211722if (current_region->section_table.size > 0)1723/* Copy each section individually. */1724return ath10k_pci_dump_memory_section(ar,1725current_region,1726buf,1727current_region->len);17281729/* No individual memory sections defined so we can1730* copy the entire memory region.1731*/1732ret = ath10k_pci_diag_read_mem(ar,1733current_region->start,1734buf,1735current_region->len);1736if (ret) {1737ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",1738current_region->name, ret);1739return ret;1740}17411742return current_region->len;1743}17441745static void ath10k_pci_dump_memory(struct ath10k *ar,1746struct ath10k_fw_crash_data *crash_data)1747{1748const struct ath10k_hw_mem_layout *mem_layout;1749const struct ath10k_mem_region *current_region;1750struct ath10k_dump_ram_data_hdr *hdr;1751u32 count, shift;1752size_t buf_len;1753int ret, i;1754u8 *buf;17551756lockdep_assert_held(&ar->dump_mutex);17571758if (!crash_data)1759return;17601761mem_layout = ath10k_coredump_get_mem_layout(ar);1762if (!mem_layout)1763return;17641765current_region = &mem_layout->region_table.regions[0];17661767buf = crash_data->ramdump_buf;1768buf_len = crash_data->ramdump_buf_len;17691770memset(buf, 0, buf_len);17711772for (i = 0; i < mem_layout->region_table.size; i++) {1773count = 0;17741775if (current_region->len > buf_len) {1776ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",1777current_region->name,1778current_region->len,1779buf_len);1780break;1781}17821783/* To get IRAM dump, the host driver needs to switch target1784* ram config from DRAM to IRAM.1785*/1786if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||1787current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {1788shift = current_region->start >> 20;17891790ret = ath10k_pci_set_ram_config(ar, shift);1791if (ret) {1792ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",1793current_region->name, ret);1794break;1795}1796}17971798/* Reserve space for the header. */1799hdr = (void *)buf;1800buf += sizeof(*hdr);1801buf_len -= sizeof(*hdr);18021803switch (current_region->type) {1804case ATH10K_MEM_REGION_TYPE_IOSRAM:1805count = ath10k_pci_dump_memory_sram(ar, current_region, buf);1806break;1807case ATH10K_MEM_REGION_TYPE_IOREG:1808ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);1809if (ret < 0)1810break;18111812count = ret;1813break;1814default:1815ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);1816if (ret < 0)1817break;18181819count = ret;1820break;1821}18221823hdr->region_type = cpu_to_le32(current_region->type);1824hdr->start = cpu_to_le32(current_region->start);1825hdr->length = cpu_to_le32(count);18261827if (count == 0)1828/* Note: the header remains, just with zero length. */1829break;18301831buf += count;1832buf_len -= count;18331834current_region++;1835}1836}18371838static void ath10k_pci_fw_dump_work(struct work_struct *work)1839{1840struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,1841dump_work);1842struct ath10k_fw_crash_data *crash_data;1843struct ath10k *ar = ar_pci->ar;1844char guid[UUID_STRING_LEN + 1];18451846mutex_lock(&ar->dump_mutex);18471848spin_lock_bh(&ar->data_lock);1849ar->stats.fw_crash_counter++;1850spin_unlock_bh(&ar->data_lock);18511852crash_data = ath10k_coredump_new(ar);18531854if (crash_data)1855scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);1856else1857scnprintf(guid, sizeof(guid), "n/a");18581859ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);1860ath10k_print_driver_info(ar);1861ath10k_pci_dump_registers(ar, crash_data);1862ath10k_ce_dump_registers(ar, crash_data);1863ath10k_pci_dump_memory(ar, crash_data);18641865mutex_unlock(&ar->dump_mutex);18661867ath10k_core_start_recovery(ar);1868}18691870static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)1871{1872struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);18731874queue_work(ar->workqueue, &ar_pci->dump_work);1875}18761877void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,1878int force)1879{1880struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);18811882ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");18831884if (!force) {1885int resources;1886/*1887* Decide whether to actually poll for completions, or just1888* wait for a later chance.1889* If there seem to be plenty of resources left, then just wait1890* since checking involves reading a CE register, which is a1891* relatively expensive operation.1892*/1893resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);18941895/*1896* If at least 50% of the total resources are still available,1897* don't bother checking again yet.1898*/1899if (resources > (ar_pci->attr[pipe].src_nentries >> 1))1900return;1901}1902ath10k_ce_per_engine_service(ar, pipe);1903}19041905static void ath10k_pci_rx_retry_sync(struct ath10k *ar)1906{1907struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);19081909timer_delete_sync(&ar_pci->rx_post_retry);1910}19111912int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,1913u8 *ul_pipe, u8 *dl_pipe)1914{1915struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);1916const struct ce_service_to_pipe *entry;1917bool ul_set = false, dl_set = false;1918int i;19191920ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");19211922for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {1923entry = &ar_pci->serv_to_pipe[i];19241925if (__le32_to_cpu(entry->service_id) != service_id)1926continue;19271928switch (__le32_to_cpu(entry->pipedir)) {1929case PIPEDIR_NONE:1930break;1931case PIPEDIR_IN:1932WARN_ON(dl_set);1933*dl_pipe = __le32_to_cpu(entry->pipenum);1934dl_set = true;1935break;1936case PIPEDIR_OUT:1937WARN_ON(ul_set);1938*ul_pipe = __le32_to_cpu(entry->pipenum);1939ul_set = true;1940break;1941case PIPEDIR_INOUT:1942WARN_ON(dl_set);1943WARN_ON(ul_set);1944*dl_pipe = __le32_to_cpu(entry->pipenum);1945*ul_pipe = __le32_to_cpu(entry->pipenum);1946dl_set = true;1947ul_set = true;1948break;1949}1950}19511952if (!ul_set || !dl_set)1953return -ENOENT;19541955return 0;1956}19571958void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,1959u8 *ul_pipe, u8 *dl_pipe)1960{1961ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");19621963(void)ath10k_pci_hif_map_service_to_pipe(ar,1964ATH10K_HTC_SVC_ID_RSVD_CTRL,1965ul_pipe, dl_pipe);1966}19671968void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)1969{1970u32 val;19711972switch (ar->hw_rev) {1973case ATH10K_HW_QCA988X:1974case ATH10K_HW_QCA9887:1975case ATH10K_HW_QCA6174:1976case ATH10K_HW_QCA9377:1977val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +1978CORE_CTRL_ADDRESS);1979val &= ~CORE_CTRL_PCIE_REG_31_MASK;1980ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +1981CORE_CTRL_ADDRESS, val);1982break;1983case ATH10K_HW_QCA99X0:1984case ATH10K_HW_QCA9984:1985case ATH10K_HW_QCA9888:1986case ATH10K_HW_QCA4019:1987/* TODO: Find appropriate register configuration for QCA99X01988* to mask irq/MSI.1989*/1990break;1991case ATH10K_HW_WCN3990:1992break;1993}1994}19951996static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)1997{1998u32 val;19992000switch (ar->hw_rev) {2001case ATH10K_HW_QCA988X:2002case ATH10K_HW_QCA9887:2003case ATH10K_HW_QCA6174:2004case ATH10K_HW_QCA9377:2005val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +2006CORE_CTRL_ADDRESS);2007val |= CORE_CTRL_PCIE_REG_31_MASK;2008ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +2009CORE_CTRL_ADDRESS, val);2010break;2011case ATH10K_HW_QCA99X0:2012case ATH10K_HW_QCA9984:2013case ATH10K_HW_QCA9888:2014case ATH10K_HW_QCA4019:2015/* TODO: Find appropriate register configuration for QCA99X02016* to unmask irq/MSI.2017*/2018break;2019case ATH10K_HW_WCN3990:2020break;2021}2022}20232024static void ath10k_pci_irq_disable(struct ath10k *ar)2025{2026ath10k_ce_disable_interrupts(ar);2027ath10k_pci_disable_and_clear_intx_irq(ar);2028ath10k_pci_irq_msi_fw_mask(ar);2029}20302031static void ath10k_pci_irq_sync(struct ath10k *ar)2032{2033struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);20342035synchronize_irq(ar_pci->pdev->irq);2036}20372038static void ath10k_pci_irq_enable(struct ath10k *ar)2039{2040ath10k_ce_enable_interrupts(ar);2041ath10k_pci_enable_intx_irq(ar);2042ath10k_pci_irq_msi_fw_unmask(ar);2043}20442045static int ath10k_pci_hif_start(struct ath10k *ar)2046{2047struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);20482049ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");20502051ath10k_core_napi_enable(ar);20522053ath10k_pci_irq_enable(ar);2054ath10k_pci_rx_post(ar);20552056pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL,2057PCI_EXP_LNKCTL_ASPMC,2058ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC);20592060return 0;2061}20622063static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)2064{2065struct ath10k *ar;2066struct ath10k_ce_pipe *ce_pipe;2067struct ath10k_ce_ring *ce_ring;2068struct sk_buff *skb;2069int i;20702071ar = pci_pipe->hif_ce_state;2072ce_pipe = pci_pipe->ce_hdl;2073ce_ring = ce_pipe->dest_ring;20742075if (!ce_ring)2076return;20772078if (!pci_pipe->buf_sz)2079return;20802081for (i = 0; i < ce_ring->nentries; i++) {2082skb = ce_ring->per_transfer_context[i];2083if (!skb)2084continue;20852086ce_ring->per_transfer_context[i] = NULL;20872088dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,2089skb->len + skb_tailroom(skb),2090DMA_FROM_DEVICE);2091dev_kfree_skb_any(skb);2092}2093}20942095static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)2096{2097struct ath10k *ar;2098struct ath10k_ce_pipe *ce_pipe;2099struct ath10k_ce_ring *ce_ring;2100struct sk_buff *skb;2101int i;21022103ar = pci_pipe->hif_ce_state;2104ce_pipe = pci_pipe->ce_hdl;2105ce_ring = ce_pipe->src_ring;21062107if (!ce_ring)2108return;21092110if (!pci_pipe->buf_sz)2111return;21122113for (i = 0; i < ce_ring->nentries; i++) {2114skb = ce_ring->per_transfer_context[i];2115if (!skb)2116continue;21172118ce_ring->per_transfer_context[i] = NULL;21192120ath10k_htc_tx_completion_handler(ar, skb);2121}2122}21232124/*2125* Cleanup residual buffers for device shutdown:2126* buffers that were enqueued for receive2127* buffers that were to be sent2128* Note: Buffers that had completed but which were2129* not yet processed are on a completion queue. They2130* are handled when the completion thread shuts down.2131*/2132static void ath10k_pci_buffer_cleanup(struct ath10k *ar)2133{2134struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);2135int pipe_num;21362137for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {2138struct ath10k_pci_pipe *pipe_info;21392140pipe_info = &ar_pci->pipe_info[pipe_num];2141ath10k_pci_rx_pipe_cleanup(pipe_info);2142ath10k_pci_tx_pipe_cleanup(pipe_info);2143}2144}21452146void ath10k_pci_ce_deinit(struct ath10k *ar)2147{2148int i;21492150for (i = 0; i < CE_COUNT; i++)2151ath10k_ce_deinit_pipe(ar, i);2152}21532154void ath10k_pci_flush(struct ath10k *ar)2155{2156ath10k_pci_rx_retry_sync(ar);2157ath10k_pci_buffer_cleanup(ar);2158}21592160static void ath10k_pci_hif_stop(struct ath10k *ar)2161{2162struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);2163unsigned long flags;21642165ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");21662167ath10k_pci_irq_disable(ar);2168ath10k_pci_irq_sync(ar);21692170ath10k_core_napi_sync_disable(ar);21712172cancel_work_sync(&ar_pci->dump_work);21732174/* Most likely the device has HTT Rx ring configured. The only way to2175* prevent the device from accessing (and possible corrupting) host2176* memory is to reset the chip now.2177*2178* There's also no known way of masking MSI interrupts on the device.2179* For ranged MSI the CE-related interrupts can be masked. However2180* regardless how many MSI interrupts are assigned the first one2181* is always used for firmware indications (crashes) and cannot be2182* masked. To prevent the device from asserting the interrupt reset it2183* before proceeding with cleanup.2184*/2185ath10k_pci_safe_chip_reset(ar);21862187ath10k_pci_flush(ar);21882189spin_lock_irqsave(&ar_pci->ps_lock, flags);2190WARN_ON(ar_pci->ps_wake_refcount > 0);2191spin_unlock_irqrestore(&ar_pci->ps_lock, flags);2192}21932194int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,2195void *req, u32 req_len,2196void *resp, u32 *resp_len)2197{2198struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);2199struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];2200struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];2201struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;2202struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;2203dma_addr_t req_paddr = 0;2204dma_addr_t resp_paddr = 0;2205struct bmi_xfer xfer = {};2206void *treq, *tresp = NULL;2207int ret = 0;22082209might_sleep();22102211if (resp && !resp_len)2212return -EINVAL;22132214if (resp && resp_len && *resp_len == 0)2215return -EINVAL;22162217treq = kmemdup(req, req_len, GFP_KERNEL);2218if (!treq)2219return -ENOMEM;22202221req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);2222ret = dma_mapping_error(ar->dev, req_paddr);2223if (ret) {2224ret = -EIO;2225goto err_dma;2226}22272228if (resp && resp_len) {2229tresp = kzalloc(*resp_len, GFP_KERNEL);2230if (!tresp) {2231ret = -ENOMEM;2232goto err_req;2233}22342235resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,2236DMA_FROM_DEVICE);2237ret = dma_mapping_error(ar->dev, resp_paddr);2238if (ret) {2239ret = -EIO;2240goto err_req;2241}22422243xfer.wait_for_resp = true;2244xfer.resp_len = 0;22452246ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);2247}22482249ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);2250if (ret)2251goto err_resp;22522253ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);2254if (ret) {2255dma_addr_t unused_buffer;2256unsigned int unused_nbytes;2257unsigned int unused_id;22582259ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,2260&unused_nbytes, &unused_id);2261} else {2262/* non-zero means we did not time out */2263ret = 0;2264}22652266err_resp:2267if (resp) {2268dma_addr_t unused_buffer;22692270ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);2271dma_unmap_single(ar->dev, resp_paddr,2272*resp_len, DMA_FROM_DEVICE);2273}2274err_req:2275dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);22762277if (ret == 0 && resp_len) {2278*resp_len = min(*resp_len, xfer.resp_len);2279memcpy(resp, tresp, *resp_len);2280}2281err_dma:2282kfree(treq);2283kfree(tresp);22842285return ret;2286}22872288static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)2289{2290struct bmi_xfer *xfer;22912292if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))2293return;22942295xfer->tx_done = true;2296}22972298static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)2299{2300struct ath10k *ar = ce_state->ar;2301struct bmi_xfer *xfer;2302unsigned int nbytes;23032304if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,2305&nbytes))2306return;23072308if (WARN_ON_ONCE(!xfer))2309return;23102311if (!xfer->wait_for_resp) {2312ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");2313return;2314}23152316xfer->resp_len = nbytes;2317xfer->rx_done = true;2318}23192320static int ath10k_pci_bmi_wait(struct ath10k *ar,2321struct ath10k_ce_pipe *tx_pipe,2322struct ath10k_ce_pipe *rx_pipe,2323struct bmi_xfer *xfer)2324{2325unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;2326unsigned long started = jiffies;2327unsigned long dur;2328int ret;23292330while (time_before_eq(jiffies, timeout)) {2331ath10k_pci_bmi_send_done(tx_pipe);2332ath10k_pci_bmi_recv_data(rx_pipe);23332334if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {2335ret = 0;2336goto out;2337}23382339#if defined(__linux__)2340schedule();2341#elif defined(__FreeBSD__)2342/*2343* Using LinuxKPI's schedule() will hang for-ever as there is2344* no wake_up. Poll about 100 times per second until timeout.2345*/2346schedule_timeout(BMI_COMMUNICATION_TIMEOUT_HZ/300);2347#endif2348}23492350ret = -ETIMEDOUT;23512352out:2353dur = jiffies - started;2354if (dur > HZ)2355ath10k_dbg(ar, ATH10K_DBG_BMI,2356"bmi cmd took %lu jiffies hz %d ret %d\n",2357dur, HZ, ret);2358return ret;2359}23602361/*2362* Send an interrupt to the device to wake up the Target CPU2363* so it has an opportunity to notice any changed state.2364*/2365static int ath10k_pci_wake_target_cpu(struct ath10k *ar)2366{2367u32 addr, val;23682369addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;2370val = ath10k_pci_read32(ar, addr);2371val |= CORE_CTRL_CPU_INTR_MASK;2372ath10k_pci_write32(ar, addr, val);23732374return 0;2375}23762377static int ath10k_pci_get_num_banks(struct ath10k *ar)2378{2379struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);23802381switch (ar_pci->pdev->device) {2382case QCA988X_2_0_DEVICE_ID_UBNT:2383case QCA988X_2_0_DEVICE_ID:2384case QCA99X0_2_0_DEVICE_ID:2385case QCA9888_2_0_DEVICE_ID:2386case QCA9984_1_0_DEVICE_ID:2387case QCA9887_1_0_DEVICE_ID:2388return 1;2389case QCA6164_2_1_DEVICE_ID:2390case QCA6174_2_1_DEVICE_ID:2391switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {2392case QCA6174_HW_1_0_CHIP_ID_REV:2393case QCA6174_HW_1_1_CHIP_ID_REV:2394case QCA6174_HW_2_1_CHIP_ID_REV:2395case QCA6174_HW_2_2_CHIP_ID_REV:2396return 3;2397case QCA6174_HW_1_3_CHIP_ID_REV:2398return 2;2399case QCA6174_HW_3_0_CHIP_ID_REV:2400case QCA6174_HW_3_1_CHIP_ID_REV:2401case QCA6174_HW_3_2_CHIP_ID_REV:2402return 9;2403}2404break;2405case QCA9377_1_0_DEVICE_ID:2406return 9;2407}24082409ath10k_warn(ar, "unknown number of banks, assuming 1\n");2410return 1;2411}24122413static int ath10k_bus_get_num_banks(struct ath10k *ar)2414{2415struct ath10k_ce *ce = ath10k_ce_priv(ar);24162417return ce->bus_ops->get_num_banks(ar);2418}24192420int ath10k_pci_init_config(struct ath10k *ar)2421{2422struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);2423u32 interconnect_targ_addr;2424u32 pcie_state_targ_addr = 0;2425u32 pipe_cfg_targ_addr = 0;2426u32 svc_to_pipe_map = 0;2427u32 pcie_config_flags = 0;2428u32 ealloc_value;2429u32 ealloc_targ_addr;2430u32 flag2_value;2431u32 flag2_targ_addr;2432int ret = 0;24332434/* Download to Target the CE Config and the service-to-CE map */2435interconnect_targ_addr =2436host_interest_item_address(HI_ITEM(hi_interconnect_state));24372438/* Supply Target-side CE configuration */2439ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,2440&pcie_state_targ_addr);2441if (ret != 0) {2442ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);2443return ret;2444}24452446if (pcie_state_targ_addr == 0) {2447ret = -EIO;2448ath10k_err(ar, "Invalid pcie state addr\n");2449return ret;2450}24512452ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +2453offsetof(struct pcie_state,2454pipe_cfg_addr)),2455&pipe_cfg_targ_addr);2456if (ret != 0) {2457ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);2458return ret;2459}24602461if (pipe_cfg_targ_addr == 0) {2462ret = -EIO;2463ath10k_err(ar, "Invalid pipe cfg addr\n");2464return ret;2465}24662467ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,2468ar_pci->pipe_config,2469sizeof(struct ce_pipe_config) *2470NUM_TARGET_CE_CONFIG_WLAN);24712472if (ret != 0) {2473ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);2474return ret;2475}24762477ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +2478offsetof(struct pcie_state,2479svc_to_pipe_map)),2480&svc_to_pipe_map);2481if (ret != 0) {2482ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);2483return ret;2484}24852486if (svc_to_pipe_map == 0) {2487ret = -EIO;2488ath10k_err(ar, "Invalid svc_to_pipe map\n");2489return ret;2490}24912492ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,2493ar_pci->serv_to_pipe,2494sizeof(pci_target_service_to_ce_map_wlan));2495if (ret != 0) {2496ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);2497return ret;2498}24992500ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +2501offsetof(struct pcie_state,2502config_flags)),2503&pcie_config_flags);2504if (ret != 0) {2505ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);2506return ret;2507}25082509pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;25102511ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +2512offsetof(struct pcie_state,2513config_flags)),2514pcie_config_flags);2515if (ret != 0) {2516ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);2517return ret;2518}25192520/* configure early allocation */2521ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));25222523ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);2524if (ret != 0) {2525ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);2526return ret;2527}25282529/* first bank is switched to IRAM */2530ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &2531HI_EARLY_ALLOC_MAGIC_MASK);2532ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<2533HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &2534HI_EARLY_ALLOC_IRAM_BANKS_MASK);25352536ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);2537if (ret != 0) {2538ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);2539return ret;2540}25412542/* Tell Target to proceed with initialization */2543flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));25442545ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);2546if (ret != 0) {2547ath10k_err(ar, "Failed to get option val: %d\n", ret);2548return ret;2549}25502551flag2_value |= HI_OPTION_EARLY_CFG_DONE;25522553ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);2554if (ret != 0) {2555ath10k_err(ar, "Failed to set option val: %d\n", ret);2556return ret;2557}25582559return 0;2560}25612562static void ath10k_pci_override_ce_config(struct ath10k *ar)2563{2564struct ce_attr *attr;2565struct ce_pipe_config *config;2566struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);25672568/* For QCA6174 we're overriding the Copy Engine 5 configuration,2569* since it is currently used for other feature.2570*/25712572/* Override Host's Copy Engine 5 configuration */2573attr = &ar_pci->attr[5];2574attr->src_sz_max = 0;2575attr->dest_nentries = 0;25762577/* Override Target firmware's Copy Engine configuration */2578config = &ar_pci->pipe_config[5];2579config->pipedir = __cpu_to_le32(PIPEDIR_OUT);2580config->nbytes_max = __cpu_to_le32(2048);25812582/* Map from service/endpoint to Copy Engine */2583ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);2584}25852586int ath10k_pci_alloc_pipes(struct ath10k *ar)2587{2588struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);2589struct ath10k_pci_pipe *pipe;2590struct ath10k_ce *ce = ath10k_ce_priv(ar);2591int i, ret;25922593for (i = 0; i < CE_COUNT; i++) {2594pipe = &ar_pci->pipe_info[i];2595pipe->ce_hdl = &ce->ce_states[i];2596pipe->pipe_num = i;2597pipe->hif_ce_state = ar;25982599ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);2600if (ret) {2601ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",2602i, ret);2603return ret;2604}26052606/* Last CE is Diagnostic Window */2607if (i == CE_DIAG_PIPE) {2608ar_pci->ce_diag = pipe->ce_hdl;2609continue;2610}26112612pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);2613}26142615return 0;2616}26172618void ath10k_pci_free_pipes(struct ath10k *ar)2619{2620int i;26212622for (i = 0; i < CE_COUNT; i++)2623ath10k_ce_free_pipe(ar, i);2624}26252626int ath10k_pci_init_pipes(struct ath10k *ar)2627{2628struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);2629int i, ret;26302631for (i = 0; i < CE_COUNT; i++) {2632ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);2633if (ret) {2634ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",2635i, ret);2636return ret;2637}2638}26392640return 0;2641}26422643static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)2644{2645return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &2646FW_IND_EVENT_PENDING;2647}26482649static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)2650{2651u32 val;26522653val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);2654val &= ~FW_IND_EVENT_PENDING;2655ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);2656}26572658static bool ath10k_pci_has_device_gone(struct ath10k *ar)2659{2660u32 val;26612662val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);2663return (val == 0xffffffff);2664}26652666/* this function effectively clears target memory controller assert line */2667static void ath10k_pci_warm_reset_si0(struct ath10k *ar)2668{2669u32 val;26702671val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);2672ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,2673val | SOC_RESET_CONTROL_SI0_RST_MASK);2674val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);26752676msleep(10);26772678val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);2679ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,2680val & ~SOC_RESET_CONTROL_SI0_RST_MASK);2681val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);26822683msleep(10);2684}26852686static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)2687{2688u32 val;26892690ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);26912692val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);2693ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,2694val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);2695}26962697static void ath10k_pci_warm_reset_ce(struct ath10k *ar)2698{2699u32 val;27002701val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);27022703ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,2704val | SOC_RESET_CONTROL_CE_RST_MASK);2705msleep(10);2706ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,2707val & ~SOC_RESET_CONTROL_CE_RST_MASK);2708}27092710static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)2711{2712u32 val;27132714val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);2715ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,2716val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);2717}27182719static int ath10k_pci_warm_reset(struct ath10k *ar)2720{2721int ret;27222723ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");27242725spin_lock_bh(&ar->data_lock);2726ar->stats.fw_warm_reset_counter++;2727spin_unlock_bh(&ar->data_lock);27282729ath10k_pci_irq_disable(ar);27302731/* Make sure the target CPU is not doing anything dangerous, e.g. if it2732* were to access copy engine while host performs copy engine reset2733* then it is possible for the device to confuse pci-e controller to2734* the point of bringing host system to a complete stop (i.e. hang).2735*/2736ath10k_pci_warm_reset_si0(ar);2737ath10k_pci_warm_reset_cpu(ar);2738ath10k_pci_init_pipes(ar);2739ath10k_pci_wait_for_target_init(ar);27402741ath10k_pci_warm_reset_clear_lf(ar);2742ath10k_pci_warm_reset_ce(ar);2743ath10k_pci_warm_reset_cpu(ar);2744ath10k_pci_init_pipes(ar);27452746ret = ath10k_pci_wait_for_target_init(ar);2747if (ret) {2748ath10k_warn(ar, "failed to wait for target init: %d\n", ret);2749return ret;2750}27512752ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");27532754return 0;2755}27562757static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)2758{2759ath10k_pci_irq_disable(ar);2760return ath10k_pci_qca99x0_chip_reset(ar);2761}27622763static int ath10k_pci_safe_chip_reset(struct ath10k *ar)2764{2765struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);27662767if (!ar_pci->pci_soft_reset)2768return -EOPNOTSUPP;27692770return ar_pci->pci_soft_reset(ar);2771}27722773static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)2774{2775int i, ret;2776u32 val;27772778ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");27792780/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.2781* It is thus preferred to use warm reset which is safer but may not be2782* able to recover the device from all possible fail scenarios.2783*2784* Warm reset doesn't always work on first try so attempt it a few2785* times before giving up.2786*/2787for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {2788ret = ath10k_pci_warm_reset(ar);2789if (ret) {2790ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",2791i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,2792ret);2793continue;2794}27952796/* FIXME: Sometimes copy engine doesn't recover after warm2797* reset. In most cases this needs cold reset. In some of these2798* cases the device is in such a state that a cold reset may2799* lock up the host.2800*2801* Reading any host interest register via copy engine is2802* sufficient to verify if device is capable of booting2803* firmware blob.2804*/2805ret = ath10k_pci_init_pipes(ar);2806if (ret) {2807ath10k_warn(ar, "failed to init copy engine: %d\n",2808ret);2809continue;2810}28112812ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,2813&val);2814if (ret) {2815ath10k_warn(ar, "failed to poke copy engine: %d\n",2816ret);2817continue;2818}28192820ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");2821return 0;2822}28232824if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {2825ath10k_warn(ar, "refusing cold reset as requested\n");2826return -EPERM;2827}28282829ret = ath10k_pci_cold_reset(ar);2830if (ret) {2831ath10k_warn(ar, "failed to cold reset: %d\n", ret);2832return ret;2833}28342835ret = ath10k_pci_wait_for_target_init(ar);2836if (ret) {2837ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",2838ret);2839return ret;2840}28412842ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");28432844return 0;2845}28462847static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)2848{2849int ret;28502851ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");28522853/* FIXME: QCA6174 requires cold + warm reset to work. */28542855ret = ath10k_pci_cold_reset(ar);2856if (ret) {2857ath10k_warn(ar, "failed to cold reset: %d\n", ret);2858return ret;2859}28602861ret = ath10k_pci_wait_for_target_init(ar);2862if (ret) {2863ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",2864ret);2865return ret;2866}28672868ret = ath10k_pci_warm_reset(ar);2869if (ret) {2870ath10k_warn(ar, "failed to warm reset: %d\n", ret);2871return ret;2872}28732874ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");28752876return 0;2877}28782879static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)2880{2881int ret;28822883ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");28842885ret = ath10k_pci_cold_reset(ar);2886if (ret) {2887ath10k_warn(ar, "failed to cold reset: %d\n", ret);2888return ret;2889}28902891ret = ath10k_pci_wait_for_target_init(ar);2892if (ret) {2893ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",2894ret);2895return ret;2896}28972898ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");28992900return 0;2901}29022903static int ath10k_pci_chip_reset(struct ath10k *ar)2904{2905struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);29062907if (WARN_ON(!ar_pci->pci_hard_reset))2908return -EOPNOTSUPP;29092910return ar_pci->pci_hard_reset(ar);2911}29122913static int ath10k_pci_hif_power_up(struct ath10k *ar,2914enum ath10k_firmware_mode fw_mode)2915{2916struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);2917int ret;29182919ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");29202921pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,2922&ar_pci->link_ctl);2923pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL,2924PCI_EXP_LNKCTL_ASPMC);29252926/*2927* Bring the target up cleanly.2928*2929* The target may be in an undefined state with an AUX-powered Target2930* and a Host in WoW mode. If the Host crashes, loses power, or is2931* restarted (without unloading the driver) then the Target is left2932* (aux) powered and running. On a subsequent driver load, the Target2933* is in an unexpected state. We try to catch that here in order to2934* reset the Target and retry the probe.2935*/2936ret = ath10k_pci_chip_reset(ar);2937if (ret) {2938if (ath10k_pci_has_fw_crashed(ar)) {2939ath10k_warn(ar, "firmware crashed during chip reset\n");2940ath10k_pci_fw_crashed_clear(ar);2941ath10k_pci_fw_crashed_dump(ar);2942}29432944ath10k_err(ar, "failed to reset chip: %d\n", ret);2945goto err_sleep;2946}29472948ret = ath10k_pci_init_pipes(ar);2949if (ret) {2950ath10k_err(ar, "failed to initialize CE: %d\n", ret);2951goto err_sleep;2952}29532954ret = ath10k_pci_init_config(ar);2955if (ret) {2956ath10k_err(ar, "failed to setup init config: %d\n", ret);2957goto err_ce;2958}29592960ret = ath10k_pci_wake_target_cpu(ar);2961if (ret) {2962ath10k_err(ar, "could not wake up target CPU: %d\n", ret);2963goto err_ce;2964}29652966return 0;29672968err_ce:2969ath10k_pci_ce_deinit(ar);29702971err_sleep:2972return ret;2973}29742975void ath10k_pci_hif_power_down(struct ath10k *ar)2976{2977ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");29782979/* Currently hif_power_up performs effectively a reset and hif_stop2980* resets the chip as well so there's no point in resetting here.2981*/2982}29832984static int ath10k_pci_hif_suspend(struct ath10k *ar)2985{2986/* Nothing to do; the important stuff is in the driver suspend. */2987return 0;2988}29892990#ifdef CONFIG_PM2991static int ath10k_pci_suspend(struct ath10k *ar)2992{2993/* The grace timer can still be counting down and ar->ps_awake be true.2994* It is known that the device may be asleep after resuming regardless2995* of the SoC powersave state before suspending. Hence make sure the2996* device is asleep before proceeding.2997*/2998ath10k_pci_sleep_sync(ar);29993000return 0;3001}3002#endif30033004static int ath10k_pci_hif_resume(struct ath10k *ar)3005{3006/* Nothing to do; the important stuff is in the driver resume. */3007return 0;3008}30093010#ifdef CONFIG_PM3011static int ath10k_pci_resume(struct ath10k *ar)3012{3013struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3014struct pci_dev *pdev = ar_pci->pdev;3015u32 val;3016int ret = 0;30173018ret = ath10k_pci_force_wake(ar);3019if (ret) {3020ath10k_err(ar, "failed to wake up target: %d\n", ret);3021return ret;3022}30233024/* Suspend/Resume resets the PCI configuration space, so we have to3025* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries3026* from interfering with C3 CPU state. pci_restore_state won't help3027* here since it only restores the first 64 bytes pci config header.3028*/3029pci_read_config_dword(pdev, 0x40, &val);3030if ((val & 0x0000ff00) != 0)3031pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);30323033return ret;3034}3035#endif30363037static bool ath10k_pci_validate_cal(void *data, size_t size)3038{3039__le16 *cal_words = data;3040u16 checksum = 0;3041size_t i;30423043if (size % 2 != 0)3044return false;30453046for (i = 0; i < size / 2; i++)3047checksum ^= le16_to_cpu(cal_words[i]);30483049return checksum == 0xffff;3050}30513052static void ath10k_pci_enable_eeprom(struct ath10k *ar)3053{3054/* Enable SI clock */3055ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);30563057/* Configure GPIOs for I2C operation */3058ath10k_pci_write32(ar,3059GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +30604 * QCA9887_1_0_I2C_SDA_GPIO_PIN,3061SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,3062GPIO_PIN0_CONFIG) |3063SM(1, GPIO_PIN0_PAD_PULL));30643065ath10k_pci_write32(ar,3066GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +30674 * QCA9887_1_0_SI_CLK_GPIO_PIN,3068SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |3069SM(1, GPIO_PIN0_PAD_PULL));30703071ath10k_pci_write32(ar,3072GPIO_BASE_ADDRESS +3073QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,30741u << QCA9887_1_0_SI_CLK_GPIO_PIN);30753076/* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */3077ath10k_pci_write32(ar,3078SI_BASE_ADDRESS + SI_CONFIG_OFFSET,3079SM(1, SI_CONFIG_ERR_INT) |3080SM(1, SI_CONFIG_BIDIR_OD_DATA) |3081SM(1, SI_CONFIG_I2C) |3082SM(1, SI_CONFIG_POS_SAMPLE) |3083SM(1, SI_CONFIG_INACTIVE_DATA) |3084SM(1, SI_CONFIG_INACTIVE_CLK) |3085SM(8, SI_CONFIG_DIVIDER));3086}30873088static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)3089{3090u32 reg;3091int wait_limit;30923093/* set device select byte and for the read operation */3094reg = QCA9887_EEPROM_SELECT_READ |3095SM(addr, QCA9887_EEPROM_ADDR_LO) |3096SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);3097ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);30983099/* write transmit data, transfer length, and START bit */3100ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,3101SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |3102SM(4, SI_CS_TX_CNT));31033104/* wait max 1 sec */3105wait_limit = 100000;31063107/* wait for SI_CS_DONE_INT */3108do {3109reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);3110if (MS(reg, SI_CS_DONE_INT))3111break;31123113wait_limit--;3114udelay(10);3115} while (wait_limit > 0);31163117if (!MS(reg, SI_CS_DONE_INT)) {3118ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",3119addr);3120return -ETIMEDOUT;3121}31223123/* clear SI_CS_DONE_INT */3124ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);31253126if (MS(reg, SI_CS_DONE_ERR)) {3127ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);3128return -EIO;3129}31303131/* extract receive data */3132reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);3133*out = reg;31343135return 0;3136}31373138static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,3139size_t *data_len)3140{3141u8 *caldata = NULL;3142size_t calsize, i;3143int ret;31443145if (!QCA_REV_9887(ar))3146return -EOPNOTSUPP;31473148calsize = ar->hw_params.cal_data_len;3149caldata = kmalloc(calsize, GFP_KERNEL);3150if (!caldata)3151return -ENOMEM;31523153ath10k_pci_enable_eeprom(ar);31543155for (i = 0; i < calsize; i++) {3156ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);3157if (ret)3158goto err_free;3159}31603161if (!ath10k_pci_validate_cal(caldata, calsize))3162goto err_free;31633164*data = caldata;3165*data_len = calsize;31663167return 0;31683169err_free:3170kfree(caldata);31713172return -EINVAL;3173}31743175static const struct ath10k_hif_ops ath10k_pci_hif_ops = {3176.tx_sg = ath10k_pci_hif_tx_sg,3177.diag_read = ath10k_pci_hif_diag_read,3178.diag_write = ath10k_pci_diag_write_mem,3179.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,3180.start = ath10k_pci_hif_start,3181.stop = ath10k_pci_hif_stop,3182.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,3183.get_default_pipe = ath10k_pci_hif_get_default_pipe,3184.send_complete_check = ath10k_pci_hif_send_complete_check,3185.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,3186.power_up = ath10k_pci_hif_power_up,3187.power_down = ath10k_pci_hif_power_down,3188.read32 = ath10k_pci_read32,3189.write32 = ath10k_pci_write32,3190.suspend = ath10k_pci_hif_suspend,3191.resume = ath10k_pci_hif_resume,3192.fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,3193};31943195/*3196* Top-level interrupt handler for all PCI interrupts from a Target.3197* When a block of MSI interrupts is allocated, this top-level handler3198* is not used; instead, we directly call the correct sub-handler.3199*/3200static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)3201{3202struct ath10k *ar = arg;3203struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3204int ret;32053206if (ath10k_pci_has_device_gone(ar))3207return IRQ_NONE;32083209ret = ath10k_pci_force_wake(ar);3210if (ret) {3211ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);3212return IRQ_NONE;3213}32143215if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) &&3216!ath10k_pci_irq_pending(ar))3217return IRQ_NONE;32183219ath10k_pci_disable_and_clear_intx_irq(ar);3220ath10k_pci_irq_msi_fw_mask(ar);3221napi_schedule(&ar->napi);32223223return IRQ_HANDLED;3224}32253226static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)3227{3228struct ath10k *ar = container_of(ctx, struct ath10k, napi);3229int done = 0;32303231if (ath10k_pci_has_fw_crashed(ar)) {3232ath10k_pci_fw_crashed_clear(ar);3233ath10k_pci_fw_crashed_dump(ar);3234napi_complete(ctx);3235return done;3236}32373238ath10k_ce_per_engine_service_any(ar);32393240done = ath10k_htt_txrx_compl_task(ar, budget);32413242if (done < budget) {3243napi_complete_done(ctx, done);3244/* In case of MSI, it is possible that interrupts are received3245* while NAPI poll is inprogress. So pending interrupts that are3246* received after processing all copy engine pipes by NAPI poll3247* will not be handled again. This is causing failure to3248* complete boot sequence in x86 platform. So before enabling3249* interrupts safer to check for pending interrupts for3250* immediate servicing.3251*/3252if (ath10k_ce_interrupt_summary(ar)) {3253napi_schedule(ctx);3254goto out;3255}3256ath10k_pci_enable_intx_irq(ar);3257ath10k_pci_irq_msi_fw_unmask(ar);3258}32593260out:3261return done;3262}32633264static int ath10k_pci_request_irq_msi(struct ath10k *ar)3265{3266struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3267int ret;32683269ret = request_irq(ar_pci->pdev->irq,3270ath10k_pci_interrupt_handler,3271IRQF_SHARED, "ath10k_pci", ar);3272if (ret) {3273ath10k_warn(ar, "failed to request MSI irq %d: %d\n",3274ar_pci->pdev->irq, ret);3275return ret;3276}32773278return 0;3279}32803281static int ath10k_pci_request_irq_intx(struct ath10k *ar)3282{3283struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3284int ret;32853286ret = request_irq(ar_pci->pdev->irq,3287ath10k_pci_interrupt_handler,3288IRQF_SHARED, "ath10k_pci", ar);3289if (ret) {3290ath10k_warn(ar, "failed to request legacy irq %d: %d\n",3291ar_pci->pdev->irq, ret);3292return ret;3293}32943295return 0;3296}32973298static int ath10k_pci_request_irq(struct ath10k *ar)3299{3300struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);33013302switch (ar_pci->oper_irq_mode) {3303case ATH10K_PCI_IRQ_INTX:3304return ath10k_pci_request_irq_intx(ar);3305case ATH10K_PCI_IRQ_MSI:3306return ath10k_pci_request_irq_msi(ar);3307default:3308return -EINVAL;3309}3310}33113312static void ath10k_pci_free_irq(struct ath10k *ar)3313{3314struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);33153316free_irq(ar_pci->pdev->irq, ar);3317}33183319void ath10k_pci_init_napi(struct ath10k *ar)3320{3321netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);3322}33233324static int ath10k_pci_init_irq(struct ath10k *ar)3325{3326struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3327int ret;33283329ath10k_pci_init_napi(ar);33303331if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)3332ath10k_info(ar, "limiting irq mode to: %d\n",3333ath10k_pci_irq_mode);33343335/* Try MSI */3336if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_INTX) {3337ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;3338ret = pci_enable_msi(ar_pci->pdev);3339if (ret == 0)3340return 0;33413342/* MHI failed, try legacy irq next */3343}33443345/* Try legacy irq3346*3347* A potential race occurs here: The CORE_BASE write3348* depends on target correctly decoding AXI address but3349* host won't know when target writes BAR to CORE_CTRL.3350* This write might get lost if target has NOT written BAR.3351* For now, fix the race by repeating the write in below3352* synchronization checking.3353*/3354ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;33553356ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,3357PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);33583359return 0;3360}33613362static void ath10k_pci_deinit_irq_intx(struct ath10k *ar)3363{3364ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,33650);3366}33673368static int ath10k_pci_deinit_irq(struct ath10k *ar)3369{3370struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);33713372switch (ar_pci->oper_irq_mode) {3373case ATH10K_PCI_IRQ_INTX:3374ath10k_pci_deinit_irq_intx(ar);3375break;3376default:3377pci_disable_msi(ar_pci->pdev);3378break;3379}33803381return 0;3382}33833384int ath10k_pci_wait_for_target_init(struct ath10k *ar)3385{3386struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3387unsigned long timeout;3388u32 val;33893390ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");33913392timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);33933394do {3395val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);33963397ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",3398val);33993400/* target should never return this */3401if (val == 0xffffffff)3402continue;34033404/* the device has crashed so don't bother trying anymore */3405if (val & FW_IND_EVENT_PENDING)3406break;34073408if (val & FW_IND_INITIALIZED)3409break;34103411if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX)3412/* Fix potential race by repeating CORE_BASE writes */3413ath10k_pci_enable_intx_irq(ar);34143415mdelay(10);3416} while (time_before(jiffies, timeout));34173418ath10k_pci_disable_and_clear_intx_irq(ar);3419ath10k_pci_irq_msi_fw_mask(ar);34203421if (val == 0xffffffff) {3422ath10k_err(ar, "failed to read device register, device is gone\n");3423return -EIO;3424}34253426if (val & FW_IND_EVENT_PENDING) {3427ath10k_warn(ar, "device has crashed during init\n");3428return -ECOMM;3429}34303431if (!(val & FW_IND_INITIALIZED)) {3432ath10k_err(ar, "failed to receive initialized event from target: %08x\n",3433val);3434return -ETIMEDOUT;3435}34363437ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");3438return 0;3439}34403441static int ath10k_pci_cold_reset(struct ath10k *ar)3442{3443u32 val;34443445ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");34463447spin_lock_bh(&ar->data_lock);34483449ar->stats.fw_cold_reset_counter++;34503451spin_unlock_bh(&ar->data_lock);34523453/* Put Target, including PCIe, into RESET. */3454val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);3455val |= 1;3456ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);34573458/* After writing into SOC_GLOBAL_RESET to put device into3459* reset and pulling out of reset pcie may not be stable3460* for any immediate pcie register access and cause bus error,3461* add delay before any pcie access request to fix this issue.3462*/3463msleep(20);34643465/* Pull Target, including PCIe, out of RESET. */3466val &= ~1;3467ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);34683469msleep(20);34703471ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");34723473return 0;3474}34753476static int ath10k_pci_claim(struct ath10k *ar)3477{3478struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3479struct pci_dev *pdev = ar_pci->pdev;3480int ret;34813482pci_set_drvdata(pdev, ar);34833484ret = pci_enable_device(pdev);3485if (ret) {3486ath10k_err(ar, "failed to enable pci device: %d\n", ret);3487return ret;3488}34893490ret = pci_request_region(pdev, BAR_NUM, "ath");3491if (ret) {3492ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,3493ret);3494goto err_device;3495}34963497/* Target expects 32 bit DMA. Enforce it. */3498ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));3499if (ret) {3500ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);3501goto err_region;3502}35033504pci_set_master(pdev);35053506#if defined(__FreeBSD__)3507linuxkpi_pcim_want_to_use_bus_functions(pdev);3508#endif35093510/* Arrange for access to Target SoC registers. */3511ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);3512ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);3513if (!ar_pci->mem) {3514ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);3515ret = -EIO;3516goto err_region;3517}35183519ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);3520return 0;35213522err_region:3523pci_release_region(pdev, BAR_NUM);35243525err_device:3526pci_disable_device(pdev);35273528return ret;3529}35303531static void ath10k_pci_release(struct ath10k *ar)3532{3533struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3534struct pci_dev *pdev = ar_pci->pdev;35353536pci_iounmap(pdev, ar_pci->mem);3537pci_release_region(pdev, BAR_NUM);3538pci_disable_device(pdev);3539}35403541static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)3542{3543const struct ath10k_pci_supp_chip *supp_chip;3544int i;3545u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);35463547for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {3548supp_chip = &ath10k_pci_supp_chips[i];35493550if (supp_chip->dev_id == dev_id &&3551supp_chip->rev_id == rev_id)3552return true;3553}35543555return false;3556}35573558int ath10k_pci_setup_resource(struct ath10k *ar)3559{3560struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);3561struct ath10k_ce *ce = ath10k_ce_priv(ar);3562int ret;35633564spin_lock_init(&ce->ce_lock);3565spin_lock_init(&ar_pci->ps_lock);3566mutex_init(&ar_pci->ce_diag_mutex);35673568INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);35693570timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);35713572ar_pci->attr = kmemdup(pci_host_ce_config_wlan,3573sizeof(pci_host_ce_config_wlan),3574GFP_KERNEL);3575if (!ar_pci->attr)3576return -ENOMEM;35773578ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,3579sizeof(pci_target_ce_config_wlan),3580GFP_KERNEL);3581if (!ar_pci->pipe_config) {3582ret = -ENOMEM;3583goto err_free_attr;3584}35853586ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,3587sizeof(pci_target_service_to_ce_map_wlan),3588GFP_KERNEL);3589if (!ar_pci->serv_to_pipe) {3590ret = -ENOMEM;3591goto err_free_pipe_config;3592}35933594if (QCA_REV_6174(ar) || QCA_REV_9377(ar))3595ath10k_pci_override_ce_config(ar);35963597ret = ath10k_pci_alloc_pipes(ar);3598if (ret) {3599ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",3600ret);3601goto err_free_serv_to_pipe;3602}36033604return 0;36053606err_free_serv_to_pipe:3607kfree(ar_pci->serv_to_pipe);3608err_free_pipe_config:3609kfree(ar_pci->pipe_config);3610err_free_attr:3611kfree(ar_pci->attr);3612return ret;3613}36143615void ath10k_pci_release_resource(struct ath10k *ar)3616{3617struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);36183619ath10k_pci_rx_retry_sync(ar);3620netif_napi_del(&ar->napi);3621ath10k_pci_ce_deinit(ar);3622ath10k_pci_free_pipes(ar);3623kfree(ar_pci->attr);3624kfree(ar_pci->pipe_config);3625kfree(ar_pci->serv_to_pipe);3626}36273628static const struct ath10k_bus_ops ath10k_pci_bus_ops = {3629.read32 = ath10k_bus_pci_read32,3630.write32 = ath10k_bus_pci_write32,3631.get_num_banks = ath10k_pci_get_num_banks,3632};36333634static int ath10k_pci_probe(struct pci_dev *pdev,3635const struct pci_device_id *pci_dev)3636{3637int ret = 0;3638struct ath10k *ar;3639struct ath10k_pci *ar_pci;3640enum ath10k_hw_rev hw_rev;3641struct ath10k_bus_params bus_params = {};3642bool pci_ps, is_qca988x = false;3643int (*pci_soft_reset)(struct ath10k *ar);3644int (*pci_hard_reset)(struct ath10k *ar);3645u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);36463647switch (pci_dev->device) {3648case QCA988X_2_0_DEVICE_ID_UBNT:3649case QCA988X_2_0_DEVICE_ID:3650hw_rev = ATH10K_HW_QCA988X;3651pci_ps = false;3652is_qca988x = true;3653pci_soft_reset = ath10k_pci_warm_reset;3654pci_hard_reset = ath10k_pci_qca988x_chip_reset;3655targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;3656break;3657case QCA9887_1_0_DEVICE_ID:3658hw_rev = ATH10K_HW_QCA9887;3659pci_ps = false;3660pci_soft_reset = ath10k_pci_warm_reset;3661pci_hard_reset = ath10k_pci_qca988x_chip_reset;3662targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;3663break;3664case QCA6164_2_1_DEVICE_ID:3665case QCA6174_2_1_DEVICE_ID:3666hw_rev = ATH10K_HW_QCA6174;3667pci_ps = true;3668pci_soft_reset = ath10k_pci_warm_reset;3669pci_hard_reset = ath10k_pci_qca6174_chip_reset;3670targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;3671break;3672case QCA99X0_2_0_DEVICE_ID:3673hw_rev = ATH10K_HW_QCA99X0;3674pci_ps = false;3675pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;3676pci_hard_reset = ath10k_pci_qca99x0_chip_reset;3677targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;3678break;3679case QCA9984_1_0_DEVICE_ID:3680hw_rev = ATH10K_HW_QCA9984;3681pci_ps = false;3682pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;3683pci_hard_reset = ath10k_pci_qca99x0_chip_reset;3684targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;3685break;3686case QCA9888_2_0_DEVICE_ID:3687hw_rev = ATH10K_HW_QCA9888;3688pci_ps = false;3689pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;3690pci_hard_reset = ath10k_pci_qca99x0_chip_reset;3691targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;3692break;3693case QCA9377_1_0_DEVICE_ID:3694hw_rev = ATH10K_HW_QCA9377;3695pci_ps = true;3696pci_soft_reset = ath10k_pci_warm_reset;3697pci_hard_reset = ath10k_pci_qca6174_chip_reset;3698targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;3699break;3700default:3701WARN_ON(1);3702return -EOPNOTSUPP;3703}37043705ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,3706hw_rev, &ath10k_pci_hif_ops);3707if (!ar) {3708dev_err(&pdev->dev, "failed to allocate core\n");3709return -ENOMEM;3710}37113712ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",3713pdev->vendor, pdev->device,3714pdev->subsystem_vendor, pdev->subsystem_device);37153716ar_pci = ath10k_pci_priv(ar);3717ar_pci->pdev = pdev;3718ar_pci->dev = &pdev->dev;3719ar_pci->ar = ar;3720ar->dev_id = pci_dev->device;3721ar_pci->pci_ps = pci_ps;3722ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;3723ar_pci->pci_soft_reset = pci_soft_reset;3724ar_pci->pci_hard_reset = pci_hard_reset;3725ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;3726ar->ce_priv = &ar_pci->ce;37273728ar->id.vendor = pdev->vendor;3729ar->id.device = pdev->device;3730ar->id.subsystem_vendor = pdev->subsystem_vendor;3731ar->id.subsystem_device = pdev->subsystem_device;37323733timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);37343735ret = ath10k_pci_setup_resource(ar);3736if (ret) {3737ath10k_err(ar, "failed to setup resource: %d\n", ret);3738goto err_core_destroy;3739}37403741ret = ath10k_pci_claim(ar);3742if (ret) {3743ath10k_err(ar, "failed to claim device: %d\n", ret);3744goto err_free_pipes;3745}37463747ret = ath10k_pci_force_wake(ar);3748if (ret) {3749ath10k_warn(ar, "failed to wake up device : %d\n", ret);3750goto err_sleep;3751}37523753ath10k_pci_ce_deinit(ar);3754ath10k_pci_irq_disable(ar);37553756ret = ath10k_pci_init_irq(ar);3757if (ret) {3758ath10k_err(ar, "failed to init irqs: %d\n", ret);3759goto err_sleep;3760}37613762ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",3763ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,3764ath10k_pci_irq_mode, ath10k_pci_reset_mode);37653766ret = ath10k_pci_request_irq(ar);3767if (ret) {3768ath10k_warn(ar, "failed to request irqs: %d\n", ret);3769goto err_deinit_irq;3770}37713772bus_params.dev_type = ATH10K_DEV_TYPE_LL;3773bus_params.link_can_suspend = true;3774/* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that3775* fall off the bus during chip_reset. These chips have the same pci3776* device id as the QCA9880 BR4A or 2R4E. So that's why the check.3777*/3778if (is_qca988x) {3779bus_params.chip_id =3780ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);3781if (bus_params.chip_id != 0xffffffff) {3782if (!ath10k_pci_chip_is_supported(pdev->device,3783bus_params.chip_id)) {3784ret = -ENODEV;3785goto err_unsupported;3786}3787}3788}37893790ret = ath10k_pci_chip_reset(ar);3791if (ret) {3792ath10k_err(ar, "failed to reset chip: %d\n", ret);3793goto err_free_irq;3794}37953796bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);3797if (bus_params.chip_id == 0xffffffff) {3798ret = -ENODEV;3799goto err_unsupported;3800}38013802if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {3803ret = -ENODEV;3804goto err_unsupported;3805}38063807ret = ath10k_core_register(ar, &bus_params);3808if (ret) {3809ath10k_err(ar, "failed to register driver core: %d\n", ret);3810goto err_free_irq;3811}38123813return 0;38143815err_unsupported:3816ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",3817pdev->device, bus_params.chip_id);38183819err_free_irq:3820ath10k_pci_free_irq(ar);38213822err_deinit_irq:3823ath10k_pci_release_resource(ar);38243825err_sleep:3826ath10k_pci_sleep_sync(ar);3827ath10k_pci_release(ar);38283829err_free_pipes:3830ath10k_pci_free_pipes(ar);38313832err_core_destroy:3833ath10k_core_destroy(ar);38343835return ret;3836}38373838static void ath10k_pci_remove(struct pci_dev *pdev)3839{3840struct ath10k *ar = pci_get_drvdata(pdev);38413842ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");38433844if (!ar)3845return;38463847ath10k_core_unregister(ar);3848ath10k_pci_free_irq(ar);3849ath10k_pci_deinit_irq(ar);3850ath10k_pci_release_resource(ar);3851ath10k_pci_sleep_sync(ar);3852ath10k_pci_release(ar);3853ath10k_core_destroy(ar);3854}38553856MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);38573858#ifdef CONFIG_PM3859static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)3860{3861struct ath10k *ar = dev_get_drvdata(dev);3862int ret;38633864ret = ath10k_pci_suspend(ar);3865if (ret)3866ath10k_warn(ar, "failed to suspend hif: %d\n", ret);38673868return ret;3869}38703871static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)3872{3873struct ath10k *ar = dev_get_drvdata(dev);3874int ret;38753876ret = ath10k_pci_resume(ar);3877if (ret)3878ath10k_warn(ar, "failed to resume hif: %d\n", ret);38793880return ret;3881}38823883static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,3884ath10k_pci_pm_suspend,3885ath10k_pci_pm_resume);3886#endif38873888static struct pci_driver ath10k_pci_driver = {3889.name = "ath10k_pci",3890.id_table = ath10k_pci_id_table,3891.probe = ath10k_pci_probe,3892.remove = ath10k_pci_remove,3893#ifdef CONFIG_PM3894.driver.pm = &ath10k_pci_pm_ops,3895#endif3896#if defined(__FreeBSD__)3897.bsddriver.name = KBUILD_MODNAME,3898/* Allow a possible native driver to attach. */3899.bsd_probe_return = (BUS_PROBE_DEFAULT - 1),3900#endif3901};39023903static int __init ath10k_pci_init(void)3904{3905int ret1, ret2;39063907ret1 = pci_register_driver(&ath10k_pci_driver);3908if (ret1)3909printk(KERN_ERR "failed to register ath10k pci driver: %d\n",3910ret1);39113912ret2 = ath10k_ahb_init();3913if (ret2)3914printk(KERN_ERR "ahb init failed: %d\n", ret2);39153916if (ret1 && ret2)3917return ret1;39183919/* registered to at least one bus */3920return 0;3921}3922module_init(ath10k_pci_init);39233924static void __exit ath10k_pci_exit(void)3925{3926pci_unregister_driver(&ath10k_pci_driver);3927ath10k_ahb_exit();3928}39293930module_exit(ath10k_pci_exit);39313932MODULE_AUTHOR("Qualcomm Atheros");3933MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices");3934MODULE_LICENSE("Dual BSD/GPL");39353936/* QCA988x 2.0 firmware files */3937MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);3938MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);3939MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);3940MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);3941MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);3942MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);39433944/* QCA9887 1.0 firmware files */3945MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);3946MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);3947MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);39483949/* QCA6174 2.1 firmware files */3950MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);3951MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);3952MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE);3953MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);39543955/* QCA6174 3.1 firmware files */3956MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);3957MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);3958MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);3959MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);3960MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);39613962/* QCA9377 1.0 firmware files */3963MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);3964MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);3965MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);396639673968