Path: blob/master/drivers/net/wireless/realtek/rtw88/pci.c
25924 views
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause1/* Copyright(c) 2018-2019 Realtek Corporation2*/34#include <linux/module.h>5#include <linux/pci.h>6#include "main.h"7#include "pci.h"8#include "reg.h"9#include "tx.h"10#include "rx.h"11#include "fw.h"12#include "ps.h"13#include "debug.h"14#include "mac.h"1516static bool rtw_disable_msi;17static bool rtw_pci_disable_aspm;18module_param_named(disable_msi, rtw_disable_msi, bool, 0644);19module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);20MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");21MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");2223static const u32 rtw_pci_tx_queue_idx_addr[] = {24[RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,25[RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,26[RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,27[RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,28[RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,29[RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,30[RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,31};3233static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,34enum rtw_tx_queue_type queue)35{36switch (queue) {37case RTW_TX_QUEUE_BCN:38return TX_DESC_QSEL_BEACON;39case RTW_TX_QUEUE_H2C:40return TX_DESC_QSEL_H2C;41case RTW_TX_QUEUE_MGMT:42return TX_DESC_QSEL_MGMT;43case RTW_TX_QUEUE_HI0:44return TX_DESC_QSEL_HIGH;45default:46return skb->priority;47}48};4950static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)51{52struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;5354return readb(rtwpci->mmap + addr);55}5657static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)58{59struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;6061return readw(rtwpci->mmap + addr);62}6364static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)65{66struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;6768return readl(rtwpci->mmap + addr);69}7071static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)72{73struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;7475writeb(val, rtwpci->mmap + addr);76}7778static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)79{80struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;8182writew(val, rtwpci->mmap + addr);83}8485static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)86{87struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;8889writel(val, rtwpci->mmap + addr);90}9192static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,93struct rtw_pci_tx_ring *tx_ring)94{95struct pci_dev *pdev = to_pci_dev(rtwdev->dev);96struct rtw_pci_tx_data *tx_data;97struct sk_buff *skb, *tmp;98dma_addr_t dma;99100/* free every skb remained in tx list */101skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {102__skb_unlink(skb, &tx_ring->queue);103tx_data = rtw_pci_get_tx_data(skb);104dma = tx_data->dma;105106dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);107dev_kfree_skb_any(skb);108}109}110111static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,112struct rtw_pci_tx_ring *tx_ring)113{114struct pci_dev *pdev = to_pci_dev(rtwdev->dev);115u8 *head = tx_ring->r.head;116u32 len = tx_ring->r.len;117int ring_sz = len * tx_ring->r.desc_size;118119rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);120121/* free the ring itself */122dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);123tx_ring->r.head = NULL;124}125126static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,127struct rtw_pci_rx_ring *rx_ring)128{129struct pci_dev *pdev = to_pci_dev(rtwdev->dev);130struct sk_buff *skb;131int buf_sz = RTK_PCI_RX_BUF_SIZE;132dma_addr_t dma;133int i;134135for (i = 0; i < rx_ring->r.len; i++) {136skb = rx_ring->buf[i];137if (!skb)138continue;139140dma = *((dma_addr_t *)skb->cb);141dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);142dev_kfree_skb(skb);143rx_ring->buf[i] = NULL;144}145}146147static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,148struct rtw_pci_rx_ring *rx_ring)149{150struct pci_dev *pdev = to_pci_dev(rtwdev->dev);151u8 *head = rx_ring->r.head;152int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;153154rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);155156dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);157}158159static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)160{161struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;162struct rtw_pci_tx_ring *tx_ring;163struct rtw_pci_rx_ring *rx_ring;164int i;165166for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {167tx_ring = &rtwpci->tx_rings[i];168rtw_pci_free_tx_ring(rtwdev, tx_ring);169}170171for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {172rx_ring = &rtwpci->rx_rings[i];173rtw_pci_free_rx_ring(rtwdev, rx_ring);174}175}176177static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,178struct rtw_pci_tx_ring *tx_ring,179u8 desc_size, u32 len)180{181struct pci_dev *pdev = to_pci_dev(rtwdev->dev);182int ring_sz = desc_size * len;183dma_addr_t dma;184u8 *head;185186if (len > TRX_BD_IDX_MASK) {187rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);188return -EINVAL;189}190191head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);192if (!head) {193rtw_err(rtwdev, "failed to allocate tx ring\n");194return -ENOMEM;195}196197skb_queue_head_init(&tx_ring->queue);198tx_ring->r.head = head;199tx_ring->r.dma = dma;200tx_ring->r.len = len;201tx_ring->r.desc_size = desc_size;202tx_ring->r.wp = 0;203tx_ring->r.rp = 0;204205return 0;206}207208static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,209struct rtw_pci_rx_ring *rx_ring,210u32 idx, u32 desc_sz)211{212struct pci_dev *pdev = to_pci_dev(rtwdev->dev);213struct rtw_pci_rx_buffer_desc *buf_desc;214int buf_sz = RTK_PCI_RX_BUF_SIZE;215dma_addr_t dma;216217if (!skb)218return -EINVAL;219220dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);221if (dma_mapping_error(&pdev->dev, dma))222return -EBUSY;223224*((dma_addr_t *)skb->cb) = dma;225buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +226idx * desc_sz);227memset(buf_desc, 0, sizeof(*buf_desc));228buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);229buf_desc->dma = cpu_to_le32(dma);230231return 0;232}233234static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,235struct rtw_pci_rx_ring *rx_ring,236u32 idx, u32 desc_sz)237{238struct device *dev = rtwdev->dev;239struct rtw_pci_rx_buffer_desc *buf_desc;240int buf_sz = RTK_PCI_RX_BUF_SIZE;241242dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);243244buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +245idx * desc_sz);246memset(buf_desc, 0, sizeof(*buf_desc));247buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);248buf_desc->dma = cpu_to_le32(dma);249}250251static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,252struct rtw_pci_rx_ring *rx_ring,253u8 desc_size, u32 len)254{255struct pci_dev *pdev = to_pci_dev(rtwdev->dev);256struct sk_buff *skb = NULL;257dma_addr_t dma;258u8 *head;259int ring_sz = desc_size * len;260int buf_sz = RTK_PCI_RX_BUF_SIZE;261int i, allocated;262int ret = 0;263264head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);265if (!head) {266rtw_err(rtwdev, "failed to allocate rx ring\n");267return -ENOMEM;268}269rx_ring->r.head = head;270271for (i = 0; i < len; i++) {272skb = dev_alloc_skb(buf_sz);273if (!skb) {274allocated = i;275ret = -ENOMEM;276goto err_out;277}278279memset(skb->data, 0, buf_sz);280rx_ring->buf[i] = skb;281ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);282if (ret) {283allocated = i;284dev_kfree_skb_any(skb);285goto err_out;286}287}288289rx_ring->r.dma = dma;290rx_ring->r.len = len;291rx_ring->r.desc_size = desc_size;292rx_ring->r.wp = 0;293rx_ring->r.rp = 0;294295return 0;296297err_out:298for (i = 0; i < allocated; i++) {299skb = rx_ring->buf[i];300if (!skb)301continue;302dma = *((dma_addr_t *)skb->cb);303dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);304dev_kfree_skb_any(skb);305rx_ring->buf[i] = NULL;306}307dma_free_coherent(&pdev->dev, ring_sz, head, dma);308309rtw_err(rtwdev, "failed to init rx buffer\n");310311return ret;312}313314static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)315{316struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;317struct rtw_pci_tx_ring *tx_ring;318struct rtw_pci_rx_ring *rx_ring;319const struct rtw_chip_info *chip = rtwdev->chip;320int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;321int tx_desc_size, rx_desc_size;322u32 len;323int ret;324325tx_desc_size = chip->tx_buf_desc_sz;326327for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {328tx_ring = &rtwpci->tx_rings[i];329len = max_num_of_tx_queue(i);330ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);331if (ret)332goto out;333}334335rx_desc_size = chip->rx_buf_desc_sz;336337for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {338rx_ring = &rtwpci->rx_rings[j];339ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,340RTK_MAX_RX_DESC_NUM);341if (ret)342goto out;343}344345return 0;346347out:348tx_alloced = i;349for (i = 0; i < tx_alloced; i++) {350tx_ring = &rtwpci->tx_rings[i];351rtw_pci_free_tx_ring(rtwdev, tx_ring);352}353354rx_alloced = j;355for (j = 0; j < rx_alloced; j++) {356rx_ring = &rtwpci->rx_rings[j];357rtw_pci_free_rx_ring(rtwdev, rx_ring);358}359360return ret;361}362363static void rtw_pci_deinit(struct rtw_dev *rtwdev)364{365rtw_pci_free_trx_ring(rtwdev);366}367368static int rtw_pci_init(struct rtw_dev *rtwdev)369{370struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;371int ret = 0;372373rtwpci->irq_mask[0] = IMR_HIGHDOK |374IMR_MGNTDOK |375IMR_BKDOK |376IMR_BEDOK |377IMR_VIDOK |378IMR_VODOK |379IMR_ROK |380IMR_BCNDMAINT_E |381IMR_C2HCMD |3820;383rtwpci->irq_mask[1] = IMR_TXFOVW |3840;385rtwpci->irq_mask[3] = IMR_H2CDOK |3860;387spin_lock_init(&rtwpci->irq_lock);388spin_lock_init(&rtwpci->hwirq_lock);389ret = rtw_pci_init_trx_ring(rtwdev);390391return ret;392}393394static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)395{396struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;397u32 len;398u8 tmp;399dma_addr_t dma;400401tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);402rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);403404dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;405rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);406407if (!rtw_chip_wcpu_8051(rtwdev)) {408len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;409dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;410rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;411rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;412rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);413rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);414}415416len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;417dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;418rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;419rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;420rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);421rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);422423len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;424dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;425rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;426rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;427rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);428rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);429430len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;431dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;432rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;433rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;434rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);435rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);436437len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;438dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;439rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;440rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;441rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);442rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);443444len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;445dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;446rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;447rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;448rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);449rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);450451len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;452dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;453rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;454rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;455rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);456rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);457458len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;459dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;460rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;461rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;462rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);463rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);464465/* reset read/write point */466rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);467468/* reset H2C Queue index in a single write */469if (rtw_chip_wcpu_3081(rtwdev))470rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,471BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);472}473474static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)475{476rtw_pci_reset_buf_desc(rtwdev);477}478479static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,480struct rtw_pci *rtwpci, bool exclude_rx)481{482unsigned long flags;483u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;484485spin_lock_irqsave(&rtwpci->hwirq_lock, flags);486487rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);488rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);489if (rtw_chip_wcpu_3081(rtwdev))490rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);491492rtwpci->irq_enabled = true;493494spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);495}496497static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,498struct rtw_pci *rtwpci)499{500unsigned long flags;501502spin_lock_irqsave(&rtwpci->hwirq_lock, flags);503504if (!rtwpci->irq_enabled)505goto out;506507rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);508rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);509if (rtw_chip_wcpu_3081(rtwdev))510rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);511512rtwpci->irq_enabled = false;513514out:515spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);516}517518static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)519{520/* reset dma and rx tag */521rtw_write32_set(rtwdev, RTK_PCI_CTRL,522BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);523rtwpci->rx_tag = 0;524}525526static int rtw_pci_setup(struct rtw_dev *rtwdev)527{528struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;529530rtw_pci_reset_trx_ring(rtwdev);531rtw_pci_dma_reset(rtwdev, rtwpci);532533return 0;534}535536static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)537{538struct rtw_pci_tx_ring *tx_ring;539enum rtw_tx_queue_type queue;540541rtw_pci_reset_trx_ring(rtwdev);542for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {543tx_ring = &rtwpci->tx_rings[queue];544rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);545}546}547548static void rtw_pci_napi_start(struct rtw_dev *rtwdev)549{550struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;551552if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))553return;554555napi_enable(&rtwpci->napi);556}557558static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)559{560struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;561562if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))563return;564565napi_synchronize(&rtwpci->napi);566napi_disable(&rtwpci->napi);567}568569static int rtw_pci_start(struct rtw_dev *rtwdev)570{571struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;572573rtw_pci_napi_start(rtwdev);574575spin_lock_bh(&rtwpci->irq_lock);576rtwpci->running = true;577rtw_pci_enable_interrupt(rtwdev, rtwpci, false);578spin_unlock_bh(&rtwpci->irq_lock);579580return 0;581}582583static void rtw_pci_stop(struct rtw_dev *rtwdev)584{585struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;586struct pci_dev *pdev = rtwpci->pdev;587588spin_lock_bh(&rtwpci->irq_lock);589rtwpci->running = false;590rtw_pci_disable_interrupt(rtwdev, rtwpci);591spin_unlock_bh(&rtwpci->irq_lock);592593synchronize_irq(pdev->irq);594rtw_pci_napi_stop(rtwdev);595596spin_lock_bh(&rtwpci->irq_lock);597rtw_pci_dma_release(rtwdev, rtwpci);598spin_unlock_bh(&rtwpci->irq_lock);599}600601static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)602{603struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;604struct rtw_pci_tx_ring *tx_ring;605enum rtw_tx_queue_type queue;606bool tx_empty = true;607608if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))609goto enter_deep_ps;610611lockdep_assert_held(&rtwpci->irq_lock);612613/* Deep PS state is not allowed to TX-DMA */614for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {615/* BCN queue is rsvd page, does not have DMA interrupt616* H2C queue is managed by firmware617*/618if (queue == RTW_TX_QUEUE_BCN ||619queue == RTW_TX_QUEUE_H2C)620continue;621622tx_ring = &rtwpci->tx_rings[queue];623624/* check if there is any skb DMAing */625if (skb_queue_len(&tx_ring->queue)) {626tx_empty = false;627break;628}629}630631if (!tx_empty) {632rtw_dbg(rtwdev, RTW_DBG_PS,633"TX path not empty, cannot enter deep power save state\n");634return;635}636enter_deep_ps:637set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);638rtw_power_mode_change(rtwdev, true);639}640641static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)642{643struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;644645lockdep_assert_held(&rtwpci->irq_lock);646647if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))648rtw_power_mode_change(rtwdev, false);649}650651static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)652{653struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;654655spin_lock_bh(&rtwpci->irq_lock);656657if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))658rtw_pci_deep_ps_enter(rtwdev);659660if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))661rtw_pci_deep_ps_leave(rtwdev);662663spin_unlock_bh(&rtwpci->irq_lock);664}665666static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,667struct rtw_pci_tx_ring *ring)668{669struct sk_buff *prev = skb_dequeue(&ring->queue);670struct rtw_pci_tx_data *tx_data;671dma_addr_t dma;672673if (!prev)674return;675676tx_data = rtw_pci_get_tx_data(prev);677dma = tx_data->dma;678dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);679dev_kfree_skb_any(prev);680}681682static void rtw_pci_dma_check(struct rtw_dev *rtwdev,683struct rtw_pci_rx_ring *rx_ring,684u32 idx)685{686struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;687const struct rtw_chip_info *chip = rtwdev->chip;688struct rtw_pci_rx_buffer_desc *buf_desc;689u32 desc_sz = chip->rx_buf_desc_sz;690u16 total_pkt_size;691692buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +693idx * desc_sz);694total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);695696/* rx tag mismatch, throw a warning */697if (total_pkt_size != rtwpci->rx_tag)698rtw_warn(rtwdev, "pci bus timeout, check dma status\n");699700rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;701}702703static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)704{705u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];706u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);707708return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);709}710711static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)712{713struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;714struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];715u32 cur_rp;716u8 i;717718/* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a719* bit dynamic, it's hard to define a reasonable fixed total timeout to720* use read_poll_timeout* helper. Instead, we can ensure a reasonable721* polling times, so we just use for loop with udelay here.722*/723for (i = 0; i < 30; i++) {724cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);725if (cur_rp == ring->r.wp)726return;727728udelay(1);729}730731if (!drop)732rtw_dbg(rtwdev, RTW_DBG_UNEXP,733"timed out to flush pci tx ring[%d]\n", pci_q);734}735736static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,737bool drop)738{739u8 q;740741for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {742/* Unnecessary to flush BCN, H2C and HI tx queues. */743if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||744q == RTW_TX_QUEUE_HI0)745continue;746747if (pci_queues & BIT(q))748__pci_flush_queue(rtwdev, q, drop);749}750}751752static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)753{754u32 pci_queues = 0;755u8 i;756757/* If all of the hardware queues are requested to flush,758* flush all of the pci queues.759*/760if (queues == BIT(rtwdev->hw->queues) - 1) {761pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;762} else {763for (i = 0; i < rtwdev->hw->queues; i++)764if (queues & BIT(i))765pci_queues |= BIT(rtw_tx_ac_to_hwq(i));766}767768__rtw_pci_flush_queues(rtwdev, pci_queues, drop);769}770771static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,772enum rtw_tx_queue_type queue)773{774struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;775struct rtw_pci_tx_ring *ring;776u32 bd_idx;777778ring = &rtwpci->tx_rings[queue];779bd_idx = rtw_pci_tx_queue_idx_addr[queue];780781spin_lock_bh(&rtwpci->irq_lock);782if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))783rtw_pci_deep_ps_leave(rtwdev);784rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);785spin_unlock_bh(&rtwpci->irq_lock);786}787788static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)789{790struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;791enum rtw_tx_queue_type queue;792793for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)794if (test_and_clear_bit(queue, rtwpci->tx_queued))795rtw_pci_tx_kick_off_queue(rtwdev, queue);796}797798static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,799struct rtw_tx_pkt_info *pkt_info,800struct sk_buff *skb,801enum rtw_tx_queue_type queue)802{803struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;804const struct rtw_chip_info *chip = rtwdev->chip;805struct rtw_pci_tx_ring *ring;806struct rtw_pci_tx_data *tx_data;807dma_addr_t dma;808u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;809u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;810u32 size;811u32 psb_len;812u8 *pkt_desc;813struct rtw_pci_tx_buffer_desc *buf_desc;814815ring = &rtwpci->tx_rings[queue];816817size = skb->len;818819if (queue == RTW_TX_QUEUE_BCN)820rtw_pci_release_rsvd_page(rtwpci, ring);821else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))822return -ENOSPC;823824pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);825memset(pkt_desc, 0, tx_pkt_desc_sz);826pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);827rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb);828dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,829DMA_TO_DEVICE);830if (dma_mapping_error(&rtwpci->pdev->dev, dma))831return -EBUSY;832833/* after this we got dma mapped, there is no way back */834buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);835memset(buf_desc, 0, tx_buf_desc_sz);836psb_len = (skb->len - 1) / 128 + 1;837if (queue == RTW_TX_QUEUE_BCN)838psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;839840buf_desc[0].psb_len = cpu_to_le16(psb_len);841buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);842buf_desc[0].dma = cpu_to_le32(dma);843buf_desc[1].buf_size = cpu_to_le16(size);844buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);845846tx_data = rtw_pci_get_tx_data(skb);847tx_data->dma = dma;848tx_data->sn = pkt_info->sn;849850spin_lock_bh(&rtwpci->irq_lock);851852skb_queue_tail(&ring->queue, skb);853854if (queue == RTW_TX_QUEUE_BCN)855goto out_unlock;856857/* update write-index, and kick it off later */858set_bit(queue, rtwpci->tx_queued);859if (++ring->r.wp >= ring->r.len)860ring->r.wp = 0;861862out_unlock:863spin_unlock_bh(&rtwpci->irq_lock);864865return 0;866}867868static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,869u32 size)870{871struct sk_buff *skb;872struct rtw_tx_pkt_info pkt_info = {0};873u8 reg_bcn_work;874int ret;875876skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);877if (!skb)878return -ENOMEM;879880ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);881if (ret) {882rtw_err(rtwdev, "failed to write rsvd page data\n");883return ret;884}885886/* reserved pages go through beacon queue */887reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);888reg_bcn_work |= BIT_PCI_BCNQ_FLAG;889rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);890891return 0;892}893894static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)895{896struct sk_buff *skb;897struct rtw_tx_pkt_info pkt_info = {0};898int ret;899900skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);901if (!skb)902return -ENOMEM;903904ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);905if (ret) {906rtw_err(rtwdev, "failed to write h2c data\n");907return ret;908}909910rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);911912return 0;913}914915static int rtw_pci_tx_write(struct rtw_dev *rtwdev,916struct rtw_tx_pkt_info *pkt_info,917struct sk_buff *skb)918{919enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);920struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;921struct rtw_pci_tx_ring *ring;922int ret;923924ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);925if (ret)926return ret;927928ring = &rtwpci->tx_rings[queue];929spin_lock_bh(&rtwpci->irq_lock);930if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {931ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));932ring->queue_stopped = true;933}934spin_unlock_bh(&rtwpci->irq_lock);935936return 0;937}938939static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,940u8 hw_queue)941{942struct ieee80211_hw *hw = rtwdev->hw;943struct ieee80211_tx_info *info;944struct rtw_pci_tx_ring *ring;945struct rtw_pci_tx_data *tx_data;946struct sk_buff *skb;947u32 count;948u32 bd_idx_addr;949u32 bd_idx, cur_rp, rp_idx;950u16 q_map;951952ring = &rtwpci->tx_rings[hw_queue];953954bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];955bd_idx = rtw_read32(rtwdev, bd_idx_addr);956cur_rp = bd_idx >> 16;957cur_rp &= TRX_BD_IDX_MASK;958rp_idx = ring->r.rp;959if (cur_rp >= ring->r.rp)960count = cur_rp - ring->r.rp;961else962count = ring->r.len - (ring->r.rp - cur_rp);963964while (count--) {965skb = skb_dequeue(&ring->queue);966if (!skb) {967rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",968count, hw_queue, bd_idx, ring->r.rp, cur_rp);969break;970}971tx_data = rtw_pci_get_tx_data(skb);972dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,973DMA_TO_DEVICE);974975/* just free command packets from host to card */976if (hw_queue == RTW_TX_QUEUE_H2C) {977dev_kfree_skb_irq(skb);978continue;979}980981if (ring->queue_stopped &&982avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {983q_map = skb_get_queue_mapping(skb);984ieee80211_wake_queue(hw, q_map);985ring->queue_stopped = false;986}987988if (++rp_idx >= ring->r.len)989rp_idx = 0;990991skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);992993info = IEEE80211_SKB_CB(skb);994995/* enqueue to wait for tx report */996if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {997rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);998continue;999}10001001/* always ACK for others, then they won't be marked as drop */1002if (info->flags & IEEE80211_TX_CTL_NO_ACK)1003info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;1004else1005info->flags |= IEEE80211_TX_STAT_ACK;10061007ieee80211_tx_info_clear_status(info);1008ieee80211_tx_status_irqsafe(hw, skb);1009}10101011ring->r.rp = cur_rp;1012}10131014static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)1015{1016struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;1017struct napi_struct *napi = &rtwpci->napi;10181019napi_schedule(napi);1020}10211022static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,1023struct rtw_pci *rtwpci)1024{1025struct rtw_pci_rx_ring *ring;1026int count = 0;1027u32 tmp, cur_wp;10281029ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];1030tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);1031cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);1032if (cur_wp >= ring->r.wp)1033count = cur_wp - ring->r.wp;1034else1035count = ring->r.len - (ring->r.wp - cur_wp);10361037return count;1038}10391040static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,1041u8 hw_queue, u32 limit)1042{1043const struct rtw_chip_info *chip = rtwdev->chip;1044struct napi_struct *napi = &rtwpci->napi;1045struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];1046struct rtw_rx_pkt_stat pkt_stat;1047struct ieee80211_rx_status rx_status;1048struct sk_buff *skb, *new;1049u32 cur_rp = ring->r.rp;1050u32 count, rx_done = 0;1051u32 pkt_offset;1052u32 pkt_desc_sz = chip->rx_pkt_desc_sz;1053u32 buf_desc_sz = chip->rx_buf_desc_sz;1054u32 new_len;1055u8 *rx_desc;1056dma_addr_t dma;10571058count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);1059count = min(count, limit);10601061while (count--) {1062rtw_pci_dma_check(rtwdev, ring, cur_rp);1063skb = ring->buf[cur_rp];1064dma = *((dma_addr_t *)skb->cb);1065dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,1066DMA_FROM_DEVICE);1067rx_desc = skb->data;1068rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);10691070/* offset from rx_desc to payload */1071pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +1072pkt_stat.shift;10731074/* allocate a new skb for this frame,1075* discard the frame if none available1076*/1077new_len = pkt_stat.pkt_len + pkt_offset;1078new = dev_alloc_skb(new_len);1079if (WARN_ONCE(!new, "rx routine starvation\n"))1080goto next_rp;10811082/* put the DMA data including rx_desc from phy to new skb */1083skb_put_data(new, skb->data, new_len);10841085if (pkt_stat.is_c2h) {1086rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);1087} else {1088/* remove rx_desc */1089skb_pull(new, pkt_offset);10901091rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat);1092rtw_rx_stats(rtwdev, pkt_stat.vif, new);1093memcpy(new->cb, &rx_status, sizeof(rx_status));1094ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);1095rx_done++;1096}10971098next_rp:1099/* new skb delivered to mac80211, re-enable original skb DMA */1100rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,1101buf_desc_sz);11021103/* host read next element in ring */1104if (++cur_rp >= ring->r.len)1105cur_rp = 0;1106}11071108ring->r.rp = cur_rp;1109/* 'rp', the last position we have read, is seen as previous posistion1110* of 'wp' that is used to calculate 'count' next time.1111*/1112ring->r.wp = cur_rp;1113rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);11141115return rx_done;1116}11171118static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,1119struct rtw_pci *rtwpci, u32 *irq_status)1120{1121unsigned long flags;11221123spin_lock_irqsave(&rtwpci->hwirq_lock, flags);11241125irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);1126irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);1127if (rtw_chip_wcpu_3081(rtwdev))1128irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);1129else1130irq_status[3] = 0;1131irq_status[0] &= rtwpci->irq_mask[0];1132irq_status[1] &= rtwpci->irq_mask[1];1133irq_status[3] &= rtwpci->irq_mask[3];1134rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);1135rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);1136if (rtw_chip_wcpu_3081(rtwdev))1137rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);11381139spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);1140}11411142static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)1143{1144struct rtw_dev *rtwdev = dev;1145struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;11461147/* disable RTW PCI interrupt to avoid more interrupts before the end of1148* thread function1149*1150* disable HIMR here to also avoid new HISR flag being raised before1151* the HISRs have been Write-1-cleared for MSI. If not all of the HISRs1152* are cleared, the edge-triggered interrupt will not be generated when1153* a new HISR flag is set.1154*/1155rtw_pci_disable_interrupt(rtwdev, rtwpci);11561157return IRQ_WAKE_THREAD;1158}11591160static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)1161{1162struct rtw_dev *rtwdev = dev;1163struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;1164u32 irq_status[4];1165bool rx = false;11661167spin_lock_bh(&rtwpci->irq_lock);1168rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);11691170if (irq_status[0] & IMR_MGNTDOK)1171rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);1172if (irq_status[0] & IMR_HIGHDOK)1173rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);1174if (irq_status[0] & IMR_BEDOK)1175rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);1176if (irq_status[0] & IMR_BKDOK)1177rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);1178if (irq_status[0] & IMR_VODOK)1179rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);1180if (irq_status[0] & IMR_VIDOK)1181rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);1182if (irq_status[3] & IMR_H2CDOK)1183rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);1184if (irq_status[0] & IMR_ROK) {1185rtw_pci_rx_isr(rtwdev);1186rx = true;1187}1188if (unlikely(irq_status[0] & IMR_C2HCMD))1189rtw_fw_c2h_cmd_isr(rtwdev);11901191/* all of the jobs for this interrupt have been done */1192if (rtwpci->running)1193rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);1194spin_unlock_bh(&rtwpci->irq_lock);11951196return IRQ_HANDLED;1197}11981199static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,1200struct pci_dev *pdev)1201{1202struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;1203unsigned long len;1204u8 bar_id = 2;1205int ret;12061207ret = pci_request_regions(pdev, KBUILD_MODNAME);1208if (ret) {1209rtw_err(rtwdev, "failed to request pci regions\n");1210return ret;1211}12121213len = pci_resource_len(pdev, bar_id);1214rtwpci->mmap = pci_iomap(pdev, bar_id, len);1215if (!rtwpci->mmap) {1216pci_release_regions(pdev);1217rtw_err(rtwdev, "failed to map pci memory\n");1218return -ENOMEM;1219}12201221return 0;1222}12231224static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,1225struct pci_dev *pdev)1226{1227struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;12281229if (rtwpci->mmap) {1230pci_iounmap(pdev, rtwpci->mmap);1231pci_release_regions(pdev);1232}1233}12341235static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)1236{1237u16 write_addr;1238u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);1239u8 flag;1240u8 cnt;12411242write_addr = addr & BITS_DBI_ADDR_MASK;1243write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);1244rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);1245rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);1246rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);12471248for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {1249flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);1250if (flag == 0)1251return;12521253udelay(10);1254}12551256WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);1257}12581259static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)1260{1261u16 read_addr = addr & BITS_DBI_ADDR_MASK;1262u8 flag;1263u8 cnt;12641265rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);1266rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);12671268for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {1269flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);1270if (flag == 0) {1271read_addr = REG_DBI_RDATA_V1 + (addr & 3);1272*value = rtw_read8(rtwdev, read_addr);1273return 0;1274}12751276udelay(10);1277}12781279WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);1280return -EIO;1281}12821283static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)1284{1285u8 page;1286u8 wflag;1287u8 cnt;12881289rtw_write16(rtwdev, REG_MDIO_V1, data);12901291page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;1292page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;1293rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);1294rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);1295rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);12961297for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {1298wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,1299BIT_MDIO_WFLAG_V1);1300if (wflag == 0)1301return;13021303udelay(10);1304}13051306WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);1307}13081309static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)1310{1311u8 value;1312int ret;13131314if (rtw_pci_disable_aspm)1315return;13161317ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);1318if (ret) {1319rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);1320return;1321}13221323if (enable)1324value |= BIT_CLKREQ_SW_EN;1325else1326value &= ~BIT_CLKREQ_SW_EN;13271328rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);1329}13301331static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)1332{1333u8 value;1334int ret;13351336ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);1337if (ret) {1338rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);1339return;1340}13411342if (enable)1343value &= ~BIT_CLKREQ_N_PAD;1344else1345value |= BIT_CLKREQ_N_PAD;13461347rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);1348}13491350static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)1351{1352u8 value;1353int ret;13541355if (rtw_pci_disable_aspm)1356return;13571358ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);1359if (ret) {1360rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);1361return;1362}13631364if (enable)1365value |= BIT_L1_SW_EN;1366else1367value &= ~BIT_L1_SW_EN;13681369rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);1370}13711372static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)1373{1374struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;13751376/* Like CLKREQ, ASPM is also implemented by two HW modules, and can1377* only be enabled when host supports it.1378*1379* And ASPM mechanism should be enabled when driver/firmware enters1380* power save mode, without having heavy traffic. Because we've1381* experienced some inter-operability issues that the link tends1382* to enter L1 state on the fly even when driver is having high1383* throughput. This is probably because the ASPM behavior slightly1384* varies from different SOC.1385*/1386if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))1387return;13881389if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||1390(!enter && atomic_inc_return(&rtwpci->link_usage) == 1))1391rtw_pci_aspm_set(rtwdev, enter);1392}13931394static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)1395{1396const struct rtw_chip_info *chip = rtwdev->chip;1397struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;1398struct pci_dev *pdev = rtwpci->pdev;1399u16 link_ctrl;1400int ret;14011402/* RTL8822CE has enabled REFCLK auto calibration, it does not need1403* to add clock delay to cover the REFCLK timing gap.1404*/1405if (chip->id == RTW_CHIP_TYPE_8822C)1406rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);14071408/* Though there is standard PCIE configuration space to set the1409* link control register, but by Realtek's design, driver should1410* check if host supports CLKREQ/ASPM to enable the HW module.1411*1412* These functions are implemented by two HW modules associated,1413* one is responsible to access PCIE configuration space to1414* follow the host settings, and another is in charge of doing1415* CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes1416* the host does not support it, and due to some reasons or wrong1417* settings (ex. CLKREQ# not Bi-Direction), it could lead to device1418* loss if HW misbehaves on the link.1419*1420* Hence it's designed that driver should first check the PCIE1421* configuration space is sync'ed and enabled, then driver can turn1422* on the other module that is actually working on the mechanism.1423*/1424ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);1425if (ret) {1426rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);1427return;1428}14291430if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)1431rtw_pci_clkreq_set(rtwdev, true);14321433rtwpci->link_ctrl = link_ctrl;1434}14351436static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)1437{1438const struct rtw_chip_info *chip = rtwdev->chip;14391440switch (chip->id) {1441case RTW_CHIP_TYPE_8822C:1442if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)1443rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,1444BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);1445break;1446default:1447break;1448}1449}14501451static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)1452{1453struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;1454const struct rtw_chip_info *chip = rtwdev->chip;1455struct rtw_efuse *efuse = &rtwdev->efuse;1456struct pci_dev *pdev = rtwpci->pdev;1457const struct rtw_intf_phy_para *para;1458u16 cut;1459u16 value;1460u16 offset;1461int i;1462int ret;14631464cut = BIT(0) << rtwdev->hal.cut_version;14651466for (i = 0; i < chip->intf_table->n_gen1_para; i++) {1467para = &chip->intf_table->gen1_para[i];1468if (!(para->cut_mask & cut))1469continue;1470if (para->offset == 0xffff)1471break;1472offset = para->offset;1473value = para->value;1474if (para->ip_sel == RTW_IP_SEL_PHY)1475rtw_mdio_write(rtwdev, offset, value, true);1476else1477rtw_dbi_write8(rtwdev, offset, value);1478}14791480for (i = 0; i < chip->intf_table->n_gen2_para; i++) {1481para = &chip->intf_table->gen2_para[i];1482if (!(para->cut_mask & cut))1483continue;1484if (para->offset == 0xffff)1485break;1486offset = para->offset;1487value = para->value;1488if (para->ip_sel == RTW_IP_SEL_PHY)1489rtw_mdio_write(rtwdev, offset, value, false);1490else1491rtw_dbi_write8(rtwdev, offset, value);1492}14931494rtw_pci_link_cfg(rtwdev);14951496/* Disable 8821ce completion timeout by default */1497if (chip->id == RTW_CHIP_TYPE_8821C) {1498ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,1499PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);1500if (ret)1501rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",1502ret);1503}15041505if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)1506rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);1507}15081509static int __maybe_unused rtw_pci_suspend(struct device *dev)1510{1511struct ieee80211_hw *hw = dev_get_drvdata(dev);1512struct rtw_dev *rtwdev = hw->priv;1513const struct rtw_chip_info *chip = rtwdev->chip;1514struct rtw_efuse *efuse = &rtwdev->efuse;15151516if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)1517rtw_pci_clkreq_pad_low(rtwdev, true);1518return 0;1519}15201521static int __maybe_unused rtw_pci_resume(struct device *dev)1522{1523struct ieee80211_hw *hw = dev_get_drvdata(dev);1524struct rtw_dev *rtwdev = hw->priv;1525const struct rtw_chip_info *chip = rtwdev->chip;1526struct rtw_efuse *efuse = &rtwdev->efuse;15271528if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)1529rtw_pci_clkreq_pad_low(rtwdev, false);1530return 0;1531}15321533SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);1534EXPORT_SYMBOL(rtw_pm_ops);15351536static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)1537{1538int ret;15391540ret = pci_enable_device(pdev);1541if (ret) {1542rtw_err(rtwdev, "failed to enable pci device\n");1543return ret;1544}15451546pci_set_master(pdev);1547pci_set_drvdata(pdev, rtwdev->hw);1548SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);15491550return 0;1551}15521553static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)1554{1555pci_disable_device(pdev);1556}15571558static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)1559{1560struct rtw_pci *rtwpci;1561int ret;15621563rtwpci = (struct rtw_pci *)rtwdev->priv;1564rtwpci->pdev = pdev;15651566/* after this driver can access to hw registers */1567ret = rtw_pci_io_mapping(rtwdev, pdev);1568if (ret) {1569rtw_err(rtwdev, "failed to request pci io region\n");1570goto err_out;1571}15721573ret = rtw_pci_init(rtwdev);1574if (ret) {1575rtw_err(rtwdev, "failed to allocate pci resources\n");1576goto err_io_unmap;1577}15781579return 0;15801581err_io_unmap:1582rtw_pci_io_unmapping(rtwdev, pdev);15831584err_out:1585return ret;1586}15871588static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)1589{1590rtw_pci_deinit(rtwdev);1591rtw_pci_io_unmapping(rtwdev, pdev);1592}15931594static const struct rtw_hci_ops rtw_pci_ops = {1595.tx_write = rtw_pci_tx_write,1596.tx_kick_off = rtw_pci_tx_kick_off,1597.flush_queues = rtw_pci_flush_queues,1598.setup = rtw_pci_setup,1599.start = rtw_pci_start,1600.stop = rtw_pci_stop,1601.deep_ps = rtw_pci_deep_ps,1602.link_ps = rtw_pci_link_ps,1603.interface_cfg = rtw_pci_interface_cfg,1604.dynamic_rx_agg = NULL,1605.write_firmware_page = rtw_write_firmware_page,16061607.read8 = rtw_pci_read8,1608.read16 = rtw_pci_read16,1609.read32 = rtw_pci_read32,1610.write8 = rtw_pci_write8,1611.write16 = rtw_pci_write16,1612.write32 = rtw_pci_write32,1613.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,1614.write_data_h2c = rtw_pci_write_data_h2c,1615};16161617static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)1618{1619unsigned int flags = PCI_IRQ_INTX;1620int ret;16211622if (!rtw_disable_msi)1623flags |= PCI_IRQ_MSI;16241625ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);1626if (ret < 0) {1627rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");1628return ret;1629}16301631ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,1632rtw_pci_interrupt_handler,1633rtw_pci_interrupt_threadfn,1634IRQF_SHARED, KBUILD_MODNAME, rtwdev);1635if (ret) {1636rtw_err(rtwdev, "failed to request irq %d\n", ret);1637pci_free_irq_vectors(pdev);1638}16391640return ret;1641}16421643static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)1644{1645devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);1646pci_free_irq_vectors(pdev);1647}16481649static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)1650{1651struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);1652struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,1653priv);1654int work_done = 0;16551656if (rtwpci->rx_no_aspm)1657rtw_pci_link_ps(rtwdev, false);16581659while (work_done < budget) {1660u32 work_done_once;16611662work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,1663budget - work_done);1664if (work_done_once == 0)1665break;1666work_done += work_done_once;1667}1668if (work_done < budget) {1669napi_complete_done(napi, work_done);1670spin_lock_bh(&rtwpci->irq_lock);1671if (rtwpci->running)1672rtw_pci_enable_interrupt(rtwdev, rtwpci, false);1673spin_unlock_bh(&rtwpci->irq_lock);1674/* When ISR happens during polling and before napi_complete1675* while no further data is received. Data on the dma_ring will1676* not be processed immediately. Check whether dma ring is1677* empty and perform napi_schedule accordingly.1678*/1679if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))1680napi_schedule(napi);1681}1682if (rtwpci->rx_no_aspm)1683rtw_pci_link_ps(rtwdev, true);16841685return work_done;1686}16871688static int rtw_pci_napi_init(struct rtw_dev *rtwdev)1689{1690struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;16911692rtwpci->netdev = alloc_netdev_dummy(0);1693if (!rtwpci->netdev)1694return -ENOMEM;16951696netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);1697return 0;1698}16991700static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)1701{1702struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;17031704rtw_pci_napi_stop(rtwdev);1705netif_napi_del(&rtwpci->napi);1706free_netdev(rtwpci->netdev);1707}17081709static pci_ers_result_t rtw_pci_io_err_detected(struct pci_dev *pdev,1710pci_channel_state_t state)1711{1712struct net_device *netdev = pci_get_drvdata(pdev);17131714netif_device_detach(netdev);17151716return PCI_ERS_RESULT_NEED_RESET;1717}17181719static pci_ers_result_t rtw_pci_io_slot_reset(struct pci_dev *pdev)1720{1721struct ieee80211_hw *hw = pci_get_drvdata(pdev);1722struct rtw_dev *rtwdev = hw->priv;17231724rtw_fw_recovery(rtwdev);17251726return PCI_ERS_RESULT_RECOVERED;1727}17281729static void rtw_pci_io_resume(struct pci_dev *pdev)1730{1731struct net_device *netdev = pci_get_drvdata(pdev);17321733/* ack any pending wake events, disable PME */1734pci_enable_wake(pdev, PCI_D0, 0);17351736netif_device_attach(netdev);1737}17381739const struct pci_error_handlers rtw_pci_err_handler = {1740.error_detected = rtw_pci_io_err_detected,1741.slot_reset = rtw_pci_io_slot_reset,1742.resume = rtw_pci_io_resume,1743};1744EXPORT_SYMBOL(rtw_pci_err_handler);17451746int rtw_pci_probe(struct pci_dev *pdev,1747const struct pci_device_id *id)1748{1749struct pci_dev *bridge = pci_upstream_bridge(pdev);1750struct ieee80211_hw *hw;1751struct rtw_dev *rtwdev;1752struct rtw_pci *rtwpci;1753int drv_data_size;1754int ret;17551756drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);1757hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);1758if (!hw) {1759dev_err(&pdev->dev, "failed to allocate hw\n");1760return -ENOMEM;1761}17621763rtwdev = hw->priv;1764rtwdev->hw = hw;1765rtwdev->dev = &pdev->dev;1766rtwdev->chip = (struct rtw_chip_info *)id->driver_data;1767rtwdev->hci.ops = &rtw_pci_ops;1768rtwdev->hci.type = RTW_HCI_TYPE_PCIE;17691770rtwpci = (struct rtw_pci *)rtwdev->priv;1771atomic_set(&rtwpci->link_usage, 1);17721773ret = rtw_core_init(rtwdev);1774if (ret)1775goto err_release_hw;17761777rtw_dbg(rtwdev, RTW_DBG_PCI,1778"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",1779pdev->vendor, pdev->device, pdev->revision);17801781ret = rtw_pci_claim(rtwdev, pdev);1782if (ret) {1783rtw_err(rtwdev, "failed to claim pci device\n");1784goto err_deinit_core;1785}17861787ret = rtw_pci_setup_resource(rtwdev, pdev);1788if (ret) {1789rtw_err(rtwdev, "failed to setup pci resources\n");1790goto err_pci_declaim;1791}17921793ret = rtw_pci_napi_init(rtwdev);1794if (ret) {1795rtw_err(rtwdev, "failed to setup NAPI\n");1796goto err_pci_declaim;1797}17981799ret = rtw_chip_info_setup(rtwdev);1800if (ret) {1801rtw_err(rtwdev, "failed to setup chip information\n");1802goto err_destroy_pci;1803}18041805/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */1806if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)1807rtwpci->rx_no_aspm = true;18081809rtw_pci_phy_cfg(rtwdev);18101811ret = rtw_register_hw(rtwdev, hw);1812if (ret) {1813rtw_err(rtwdev, "failed to register hw\n");1814goto err_destroy_pci;1815}18161817ret = rtw_pci_request_irq(rtwdev, pdev);1818if (ret) {1819ieee80211_unregister_hw(hw);1820goto err_destroy_pci;1821}18221823return 0;18241825err_destroy_pci:1826rtw_pci_napi_deinit(rtwdev);1827rtw_pci_destroy(rtwdev, pdev);18281829err_pci_declaim:1830rtw_pci_declaim(rtwdev, pdev);18311832err_deinit_core:1833rtw_core_deinit(rtwdev);18341835err_release_hw:1836ieee80211_free_hw(hw);18371838return ret;1839}1840EXPORT_SYMBOL(rtw_pci_probe);18411842void rtw_pci_remove(struct pci_dev *pdev)1843{1844struct ieee80211_hw *hw = pci_get_drvdata(pdev);1845struct rtw_dev *rtwdev;1846struct rtw_pci *rtwpci;18471848if (!hw)1849return;18501851rtwdev = hw->priv;1852rtwpci = (struct rtw_pci *)rtwdev->priv;18531854rtw_unregister_hw(rtwdev, hw);1855rtw_pci_disable_interrupt(rtwdev, rtwpci);1856rtw_pci_napi_deinit(rtwdev);1857rtw_pci_destroy(rtwdev, pdev);1858rtw_pci_declaim(rtwdev, pdev);1859rtw_pci_free_irq(rtwdev, pdev);1860rtw_core_deinit(rtwdev);1861ieee80211_free_hw(hw);1862}1863EXPORT_SYMBOL(rtw_pci_remove);18641865void rtw_pci_shutdown(struct pci_dev *pdev)1866{1867struct ieee80211_hw *hw = pci_get_drvdata(pdev);1868struct rtw_dev *rtwdev;1869const struct rtw_chip_info *chip;18701871if (!hw)1872return;18731874rtwdev = hw->priv;1875chip = rtwdev->chip;18761877if (chip->ops->shutdown)1878chip->ops->shutdown(rtwdev);18791880pci_set_power_state(pdev, PCI_D3hot);1881}1882EXPORT_SYMBOL(rtw_pci_shutdown);18831884MODULE_AUTHOR("Realtek Corporation");1885MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver");1886MODULE_LICENSE("Dual BSD/GPL");188718881889