Path: blob/main/sys/contrib/dev/mediatek/mt76/dma.h
105544 views
/* SPDX-License-Identifier: BSD-3-Clause-Clear */1/*2* Copyright (C) 2016 Felix Fietkau <[email protected]>3*/4#ifndef __MT76_DMA_H5#define __MT76_DMA_H67#define DMA_DUMMY_DATA ((void *)~0)89#define MT_RING_SIZE 0x101011#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)12#define MT_DMA_CTL_LAST_SEC1 BIT(14)13#define MT_DMA_CTL_BURST BIT(15)14#define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16)15#define MT_DMA_CTL_LAST_SEC0 BIT(30)16#define MT_DMA_CTL_DMA_DONE BIT(31)17#define MT_DMA_CTL_TO_HOST BIT(8)18#define MT_DMA_CTL_TO_HOST_A BIT(12)19#define MT_DMA_CTL_DROP BIT(14)20#define MT_DMA_CTL_TOKEN GENMASK(31, 16)21#define MT_DMA_CTL_SDP1_H GENMASK(19, 16)22#define MT_DMA_CTL_SDP0_H GENMASK(3, 0)23#define MT_DMA_CTL_WO_DROP BIT(8)2425#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11)26#define MT_DMA_PPE_ENTRY GENMASK(30, 16)27#define MT_DMA_INFO_DMA_FRAG BIT(9)28#define MT_DMA_INFO_PPE_VLD BIT(31)2930#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)31#define MT_DMA_CTL_VER_MASK BIT(7)3233#define MT_DMA_SDP0 GENMASK(15, 0)34#define MT_DMA_TOKEN_ID GENMASK(31, 16)35#define MT_DMA_MAGIC_MASK GENMASK(31, 28)36#define MT_DMA_RRO_EN BIT(13)3738#define MT_DMA_MAGIC_CNT 163940#define MT_DMA_WED_IND_CMD_CNT 841#define MT_DMA_WED_IND_REASON GENMASK(15, 12)4243#define MT_DMA_HDR_LEN 444#define MT_RX_INFO_LEN 445#define MT_FCE_INFO_LEN 446#define MT_RX_RXWI_LEN 324748#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)4950#define Q_READ(_q, _field) ({ \51u32 _offset = offsetof(struct mt76_queue_regs, _field); \52u32 _val; \53if ((_q)->flags & MT_QFLAG_WED) \54_val = mtk_wed_device_reg_read((_q)->wed, \55((_q)->wed_regs + \56_offset)); \57else \58_val = readl(&(_q)->regs->_field); \59_val; \60})6162#define Q_WRITE(_q, _field, _val) do { \63u32 _offset = offsetof(struct mt76_queue_regs, _field); \64if ((_q)->flags & MT_QFLAG_WED) \65mtk_wed_device_reg_write((_q)->wed, \66((_q)->wed_regs + _offset), \67_val); \68else \69writel(_val, &(_q)->regs->_field); \70} while (0)7172#elif IS_ENABLED(CONFIG_MT76_NPU)7374#define Q_READ(_q, _field) ({ \75u32 _offset = offsetof(struct mt76_queue_regs, _field); \76u32 _val = 0; \77if ((_q)->flags & MT_QFLAG_NPU) { \78struct airoha_npu *npu; \79\80rcu_read_lock(); \81npu = rcu_dereference(q->dev->mmio.npu); \82if (npu) \83regmap_read(npu->regmap, \84((_q)->wed_regs + _offset), &_val); \85rcu_read_unlock(); \86} else { \87_val = readl(&(_q)->regs->_field); \88} \89_val; \90})9192#define Q_WRITE(_q, _field, _val) do { \93u32 _offset = offsetof(struct mt76_queue_regs, _field); \94if ((_q)->flags & MT_QFLAG_NPU) { \95struct airoha_npu *npu; \96\97rcu_read_lock(); \98npu = rcu_dereference(q->dev->mmio.npu); \99if (npu) \100regmap_write(npu->regmap, \101((_q)->wed_regs + _offset), _val); \102rcu_read_unlock(); \103} else { \104writel(_val, &(_q)->regs->_field); \105} \106} while (0)107108#else109110#define Q_READ(_q, _field) readl(&(_q)->regs->_field)111#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)112113#endif114115struct mt76_desc {116__le32 buf0;117__le32 ctrl;118__le32 buf1;119__le32 info;120} __packed __aligned(4);121122struct mt76_wed_rro_desc {123__le32 buf0;124__le32 buf1;125} __packed __aligned(4);126127/* data1 */128#define RRO_RXDMAD_DATA1_LS_MASK BIT(30)129#define RRO_RXDMAD_DATA1_SDL0_MASK GENMASK(29, 16)130/* data2 */131#define RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK GENMASK(31, 16)132#define RRO_RXDMAD_DATA2_IND_REASON_MASK GENMASK(15, 12)133/* data3 */134#define RRO_RXDMAD_DATA3_MAGIC_CNT_MASK GENMASK(31, 28)135struct mt76_rro_rxdmad_c {136__le32 data0;137__le32 data1;138__le32 data2;139__le32 data3;140};141142enum mt76_qsel {143MT_QSEL_MGMT,144MT_QSEL_HCCA,145MT_QSEL_EDCA,146MT_QSEL_EDCA_2,147};148149enum mt76_mcu_evt_type {150EVT_CMD_DONE,151EVT_CMD_ERROR,152EVT_CMD_RETRY,153EVT_EVENT_PWR_RSP,154EVT_EVENT_WOW_RSP,155EVT_EVENT_CARRIER_DETECT_RSP,156EVT_EVENT_DFS_DETECT_RSP,157};158159enum mt76_dma_wed_ind_reason {160MT_DMA_WED_IND_REASON_NORMAL,161MT_DMA_WED_IND_REASON_REPEAT,162MT_DMA_WED_IND_REASON_OLDPKT,163};164165int mt76_dma_rx_poll(struct napi_struct *napi, int budget);166void mt76_dma_attach(struct mt76_dev *dev);167void mt76_dma_cleanup(struct mt76_dev *dev);168int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,169bool allow_direct);170void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,171bool reset_idx);172173static inline void174mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)175{176dev->queue_ops->reset_q(dev, q, true);177if (mtk_wed_device_active(&dev->mmio.wed))178mt76_wed_dma_setup(dev, q, true);179}180181static inline void182mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)183{184if (!drop)185return;186187*drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));188if (!(ctrl & MT_DMA_CTL_VER_MASK))189return;190191switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) {192case MT_DMA_WED_IND_REASON_REPEAT:193*drop = true;194break;195case MT_DMA_WED_IND_REASON_OLDPKT:196*drop = !(info & MT_DMA_INFO_DMA_FRAG);197break;198default:199*drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);200break;201}202}203204static inline void *mt76_priv(struct net_device *dev)205{206struct mt76_dev **priv;207208priv = netdev_priv(dev);209210return *priv;211}212213#endif214215216