Path: blob/main/sys/contrib/dev/mediatek/mt76/dma.c
48378 views
// SPDX-License-Identifier: ISC1/*2* Copyright (C) 2016 Felix Fietkau <[email protected]>3*/45#include <linux/dma-mapping.h>6#if defined(__FreeBSD__)7#include <linux/cache.h>8#include <net/page_pool/helpers.h>9#endif10#include "mt76.h"11#include "dma.h"1213#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)1415#define Q_READ(_q, _field) ({ \16u32 _offset = offsetof(struct mt76_queue_regs, _field); \17u32 _val; \18if ((_q)->flags & MT_QFLAG_WED) \19_val = mtk_wed_device_reg_read((_q)->wed, \20((_q)->wed_regs + \21_offset)); \22else \23_val = readl(&(_q)->regs->_field); \24_val; \25})2627#define Q_WRITE(_q, _field, _val) do { \28u32 _offset = offsetof(struct mt76_queue_regs, _field); \29if ((_q)->flags & MT_QFLAG_WED) \30mtk_wed_device_reg_write((_q)->wed, \31((_q)->wed_regs + _offset), \32_val); \33else \34writel(_val, &(_q)->regs->_field); \35} while (0)3637#else3839#define Q_READ(_q, _field) readl(&(_q)->regs->_field)40#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)4142#endif4344static struct mt76_txwi_cache *45mt76_alloc_txwi(struct mt76_dev *dev)46{47struct mt76_txwi_cache *t;48dma_addr_t addr;49u8 *txwi;50int size;5152size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));53txwi = kzalloc(size, GFP_ATOMIC);54if (!txwi)55return NULL;5657addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,58DMA_TO_DEVICE);59if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {60kfree(txwi);61return NULL;62}6364t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);65t->dma_addr = addr;6667return t;68}6970static struct mt76_txwi_cache *71mt76_alloc_rxwi(struct mt76_dev *dev)72{73struct mt76_txwi_cache *t;7475t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);76if (!t)77return NULL;7879t->ptr = NULL;80return t;81}8283static struct mt76_txwi_cache *84__mt76_get_txwi(struct mt76_dev *dev)85{86struct mt76_txwi_cache *t = NULL;8788spin_lock(&dev->lock);89if (!list_empty(&dev->txwi_cache)) {90t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,91list);92list_del(&t->list);93}94spin_unlock(&dev->lock);9596return t;97}9899static struct mt76_txwi_cache *100__mt76_get_rxwi(struct mt76_dev *dev)101{102struct mt76_txwi_cache *t = NULL;103104spin_lock_bh(&dev->wed_lock);105if (!list_empty(&dev->rxwi_cache)) {106t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,107list);108list_del(&t->list);109}110spin_unlock_bh(&dev->wed_lock);111112return t;113}114115static struct mt76_txwi_cache *116mt76_get_txwi(struct mt76_dev *dev)117{118struct mt76_txwi_cache *t = __mt76_get_txwi(dev);119120if (t)121return t;122123return mt76_alloc_txwi(dev);124}125126struct mt76_txwi_cache *127mt76_get_rxwi(struct mt76_dev *dev)128{129struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);130131if (t)132return t;133134return mt76_alloc_rxwi(dev);135}136EXPORT_SYMBOL_GPL(mt76_get_rxwi);137138void139mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)140{141if (!t)142return;143144spin_lock(&dev->lock);145list_add(&t->list, &dev->txwi_cache);146spin_unlock(&dev->lock);147}148EXPORT_SYMBOL_GPL(mt76_put_txwi);149150void151mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)152{153if (!t)154return;155156spin_lock_bh(&dev->wed_lock);157list_add(&t->list, &dev->rxwi_cache);158spin_unlock_bh(&dev->wed_lock);159}160EXPORT_SYMBOL_GPL(mt76_put_rxwi);161162static void163mt76_free_pending_txwi(struct mt76_dev *dev)164{165struct mt76_txwi_cache *t;166167local_bh_disable();168while ((t = __mt76_get_txwi(dev)) != NULL) {169dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,170DMA_TO_DEVICE);171kfree(mt76_get_txwi_ptr(dev, t));172}173local_bh_enable();174}175176void177mt76_free_pending_rxwi(struct mt76_dev *dev)178{179struct mt76_txwi_cache *t;180181local_bh_disable();182while ((t = __mt76_get_rxwi(dev)) != NULL) {183if (t->ptr)184mt76_put_page_pool_buf(t->ptr, false);185kfree(t);186}187local_bh_enable();188}189EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);190191static void192mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)193{194Q_WRITE(q, desc_base, q->desc_dma);195if (q->flags & MT_QFLAG_WED_RRO_EN)196Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);197else198Q_WRITE(q, ring_size, q->ndesc);199q->head = Q_READ(q, dma_idx);200q->tail = q->head;201}202203void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,204bool reset_idx)205{206if (!q || !q->ndesc)207return;208209if (!mt76_queue_is_wed_rro_ind(q)) {210int i;211212/* clear descriptors */213for (i = 0; i < q->ndesc; i++)214q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);215}216217if (reset_idx) {218Q_WRITE(q, cpu_idx, 0);219Q_WRITE(q, dma_idx, 0);220}221mt76_dma_sync_idx(dev, q);222}223224void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)225{226__mt76_dma_queue_reset(dev, q, true);227}228229static int230mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,231struct mt76_queue_buf *buf, void *data)232{233struct mt76_queue_entry *entry = &q->entry[q->head];234struct mt76_txwi_cache *txwi = NULL;235struct mt76_desc *desc;236int idx = q->head;237u32 buf1 = 0, ctrl;238int rx_token;239240if (mt76_queue_is_wed_rro_ind(q)) {241struct mt76_wed_rro_desc *rro_desc;242243rro_desc = (struct mt76_wed_rro_desc *)q->desc;244data = &rro_desc[q->head];245goto done;246}247248desc = &q->desc[q->head];249ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);250#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT251buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);252#endif253254if (mt76_queue_is_wed_rx(q)) {255txwi = mt76_get_rxwi(dev);256if (!txwi)257return -ENOMEM;258259rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);260if (rx_token < 0) {261mt76_put_rxwi(dev, txwi);262return -ENOMEM;263}264265buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);266ctrl |= MT_DMA_CTL_TO_HOST;267}268269WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));270WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));271WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));272WRITE_ONCE(desc->info, 0);273274done:275entry->dma_addr[0] = buf->addr;276entry->dma_len[0] = buf->len;277entry->txwi = txwi;278entry->buf = data;279entry->wcid = 0xffff;280entry->skip_buf1 = true;281q->head = (q->head + 1) % q->ndesc;282q->queued++;283284return idx;285}286287static int288mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,289struct mt76_queue_buf *buf, int nbufs, u32 info,290struct sk_buff *skb, void *txwi)291{292struct mt76_queue_entry *entry;293struct mt76_desc *desc;294int i, idx = -1;295u32 ctrl, next;296297if (txwi) {298q->entry[q->head].txwi = DMA_DUMMY_DATA;299q->entry[q->head].skip_buf0 = true;300}301302for (i = 0; i < nbufs; i += 2, buf += 2) {303u32 buf0 = buf[0].addr, buf1 = 0;304305idx = q->head;306next = (q->head + 1) % q->ndesc;307308desc = &q->desc[idx];309entry = &q->entry[idx];310311if (buf[0].skip_unmap)312entry->skip_buf0 = true;313entry->skip_buf1 = i == nbufs - 1;314315entry->dma_addr[0] = buf[0].addr;316entry->dma_len[0] = buf[0].len;317318ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);319#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT320info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);321#endif322if (i < nbufs - 1) {323entry->dma_addr[1] = buf[1].addr;324entry->dma_len[1] = buf[1].len;325buf1 = buf[1].addr;326ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);327#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT328info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,329buf[1].addr >> 32);330#endif331if (buf[1].skip_unmap)332entry->skip_buf1 = true;333}334335if (i == nbufs - 1)336ctrl |= MT_DMA_CTL_LAST_SEC0;337else if (i == nbufs - 2)338ctrl |= MT_DMA_CTL_LAST_SEC1;339340WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));341WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));342WRITE_ONCE(desc->info, cpu_to_le32(info));343WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));344345q->head = next;346q->queued++;347}348349q->entry[idx].txwi = txwi;350q->entry[idx].skb = skb;351q->entry[idx].wcid = 0xffff;352353return idx;354}355356static void357mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,358struct mt76_queue_entry *prev_e)359{360struct mt76_queue_entry *e = &q->entry[idx];361362if (!e->skip_buf0)363dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],364DMA_TO_DEVICE);365366if (!e->skip_buf1)367dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],368DMA_TO_DEVICE);369370if (e->txwi == DMA_DUMMY_DATA)371e->txwi = NULL;372373*prev_e = *e;374memset(e, 0, sizeof(*e));375}376377static void378mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)379{380wmb();381Q_WRITE(q, cpu_idx, q->head);382}383384static void385mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)386{387struct mt76_queue_entry entry;388int last;389390if (!q || !q->ndesc)391return;392393spin_lock_bh(&q->cleanup_lock);394if (flush)395last = -1;396else397last = Q_READ(q, dma_idx);398399while (q->queued > 0 && q->tail != last) {400mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);401mt76_queue_tx_complete(dev, q, &entry);402403if (entry.txwi) {404if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))405mt76_put_txwi(dev, entry.txwi);406}407408if (!flush && q->tail == last)409last = Q_READ(q, dma_idx);410}411spin_unlock_bh(&q->cleanup_lock);412413if (flush) {414spin_lock_bh(&q->lock);415mt76_dma_sync_idx(dev, q);416mt76_dma_kick_queue(dev, q);417spin_unlock_bh(&q->lock);418}419420if (!q->queued)421wake_up(&dev->tx_wait);422}423424static void *425mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,426int *len, u32 *info, bool *more, bool *drop)427{428struct mt76_queue_entry *e = &q->entry[idx];429struct mt76_desc *desc = &q->desc[idx];430u32 ctrl, desc_info, buf1;431void *buf = e->buf;432433if (mt76_queue_is_wed_rro_ind(q))434goto done;435436ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));437if (len) {438*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);439*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);440}441442desc_info = le32_to_cpu(desc->info);443if (info)444*info = desc_info;445446buf1 = le32_to_cpu(desc->buf1);447mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);448449if (mt76_queue_is_wed_rx(q)) {450u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);451struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);452453if (!t)454return NULL;455456dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,457SKB_WITH_OVERHEAD(q->buf_size),458page_pool_get_dma_dir(q->page_pool));459460buf = t->ptr;461t->dma_addr = 0;462t->ptr = NULL;463464mt76_put_rxwi(dev, t);465if (drop)466*drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);467} else {468dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],469SKB_WITH_OVERHEAD(q->buf_size),470page_pool_get_dma_dir(q->page_pool));471}472473done:474e->buf = NULL;475return buf;476}477478static void *479mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,480int *len, u32 *info, bool *more, bool *drop)481{482int idx = q->tail;483484*more = false;485if (!q->queued)486return NULL;487488if (mt76_queue_is_wed_rro_data(q))489return NULL;490491if (!mt76_queue_is_wed_rro_ind(q)) {492if (flush)493q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);494else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))495return NULL;496}497498q->tail = (q->tail + 1) % q->ndesc;499q->queued--;500501return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);502}503504static int505mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,506struct sk_buff *skb, u32 tx_info)507{508struct mt76_queue_buf buf = {};509dma_addr_t addr;510511if (test_bit(MT76_MCU_RESET, &dev->phy.state))512goto error;513514if (q->queued + 1 >= q->ndesc - 1)515goto error;516517addr = dma_map_single(dev->dma_dev, skb->data, skb->len,518DMA_TO_DEVICE);519if (unlikely(dma_mapping_error(dev->dma_dev, addr)))520goto error;521522buf.addr = addr;523buf.len = skb->len;524525spin_lock_bh(&q->lock);526mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);527mt76_dma_kick_queue(dev, q);528spin_unlock_bh(&q->lock);529530return 0;531532error:533dev_kfree_skb(skb);534return -ENOMEM;535}536537static int538mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,539enum mt76_txq_id qid, struct sk_buff *skb,540struct mt76_wcid *wcid, struct ieee80211_sta *sta)541{542struct ieee80211_tx_status status = {543.sta = sta,544};545struct mt76_tx_info tx_info = {546.skb = skb,547};548struct mt76_dev *dev = phy->dev;549struct ieee80211_hw *hw;550int len, n = 0, ret = -ENOMEM;551struct mt76_txwi_cache *t;552struct sk_buff *iter;553dma_addr_t addr;554u8 *txwi;555556if (test_bit(MT76_RESET, &phy->state))557goto free_skb;558559t = mt76_get_txwi(dev);560if (!t)561goto free_skb;562563txwi = mt76_get_txwi_ptr(dev, t);564565skb->prev = skb->next = NULL;566if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)567mt76_insert_hdr_pad(skb);568569len = skb_headlen(skb);570addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);571if (unlikely(dma_mapping_error(dev->dma_dev, addr)))572goto free;573574tx_info.buf[n].addr = t->dma_addr;575tx_info.buf[n++].len = dev->drv->txwi_size;576tx_info.buf[n].addr = addr;577tx_info.buf[n++].len = len;578579skb_walk_frags(skb, iter) {580if (n == ARRAY_SIZE(tx_info.buf))581goto unmap;582583addr = dma_map_single(dev->dma_dev, iter->data, iter->len,584DMA_TO_DEVICE);585if (unlikely(dma_mapping_error(dev->dma_dev, addr)))586goto unmap;587588tx_info.buf[n].addr = addr;589tx_info.buf[n++].len = iter->len;590}591tx_info.nbuf = n;592593if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {594ret = -ENOMEM;595goto unmap;596}597598dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,599DMA_TO_DEVICE);600ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);601dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,602DMA_TO_DEVICE);603if (ret < 0)604goto unmap;605606return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,607tx_info.info, tx_info.skb, t);608609unmap:610for (n--; n > 0; n--)611dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,612tx_info.buf[n].len, DMA_TO_DEVICE);613614free:615#ifdef CONFIG_NL80211_TESTMODE616/* fix tx_done accounting on queue overflow */617if (mt76_is_testmode_skb(dev, skb, &hw)) {618struct mt76_phy *phy = hw->priv;619620if (tx_info.skb == phy->test.tx_skb)621phy->test.tx_done--;622}623#endif624625mt76_put_txwi(dev, t);626627free_skb:628status.skb = tx_info.skb;629hw = mt76_tx_status_get_hw(dev, tx_info.skb);630spin_lock_bh(&dev->rx_lock);631ieee80211_tx_status_ext(hw, &status);632spin_unlock_bh(&dev->rx_lock);633634return ret;635}636637static int638mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,639bool allow_direct)640{641int len = SKB_WITH_OVERHEAD(q->buf_size);642int frames = 0;643644if (!q->ndesc)645return 0;646647while (q->queued < q->ndesc - 1) {648struct mt76_queue_buf qbuf = {};649void *buf = NULL;650int offset;651652if (mt76_queue_is_wed_rro_ind(q))653goto done;654655buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);656if (!buf)657break;658659qbuf.addr = page_pool_get_dma_addr(virt_to_head_page(buf)) +660offset + q->buf_offset;661done:662qbuf.len = len - q->buf_offset;663qbuf.skip_unmap = false;664if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {665mt76_put_page_pool_buf(buf, allow_direct);666break;667}668frames++;669}670671if (frames || mt76_queue_is_wed_rx(q))672mt76_dma_kick_queue(dev, q);673674return frames;675}676677int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,678bool allow_direct)679{680int frames;681682if (!q->ndesc)683return 0;684685spin_lock_bh(&q->lock);686frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);687spin_unlock_bh(&q->lock);688689return frames;690}691692static int693mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,694int idx, int n_desc, int bufsize,695u32 ring_base)696{697int ret, size;698699spin_lock_init(&q->lock);700spin_lock_init(&q->cleanup_lock);701702#if defined(__linux__)703q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;704#elif defined(__FreeBSD__)705q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE);706#endif707q->ndesc = n_desc;708q->buf_size = bufsize;709q->hw_idx = idx;710711size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)712: sizeof(struct mt76_desc);713q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,714&q->desc_dma, GFP_KERNEL);715if (!q->desc)716return -ENOMEM;717718if (mt76_queue_is_wed_rro_ind(q)) {719struct mt76_wed_rro_desc *rro_desc;720int i;721722rro_desc = (struct mt76_wed_rro_desc *)q->desc;723for (i = 0; i < q->ndesc; i++) {724struct mt76_wed_rro_ind *cmd;725726cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];727cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;728}729}730731size = q->ndesc * sizeof(*q->entry);732q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);733if (!q->entry)734return -ENOMEM;735736ret = mt76_create_page_pool(dev, q);737if (ret)738return ret;739740ret = mt76_wed_dma_setup(dev, q, false);741if (ret)742return ret;743744if (mtk_wed_device_active(&dev->mmio.wed)) {745if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||746mt76_queue_is_wed_tx_free(q))747return 0;748}749750mt76_dma_queue_reset(dev, q);751752return 0;753}754755static void756mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)757{758void *buf;759bool more;760761if (!q->ndesc)762return;763764do {765spin_lock_bh(&q->lock);766buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);767spin_unlock_bh(&q->lock);768769if (!buf)770break;771772if (!mt76_queue_is_wed_rro(q))773mt76_put_page_pool_buf(buf, false);774} while (1);775776spin_lock_bh(&q->lock);777if (q->rx_head) {778dev_kfree_skb(q->rx_head);779q->rx_head = NULL;780}781782spin_unlock_bh(&q->lock);783}784785static void786mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)787{788struct mt76_queue *q = &dev->q_rx[qid];789790if (!q->ndesc)791return;792793if (!mt76_queue_is_wed_rro_ind(q)) {794int i;795796for (i = 0; i < q->ndesc; i++)797q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);798}799800mt76_dma_rx_cleanup(dev, q);801802/* reset WED rx queues */803mt76_wed_dma_setup(dev, q, true);804805if (mt76_queue_is_wed_tx_free(q))806return;807808if (mtk_wed_device_active(&dev->mmio.wed) &&809mt76_queue_is_wed_rro(q))810return;811812mt76_dma_sync_idx(dev, q);813mt76_dma_rx_fill_buf(dev, q, false);814}815816static void817mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,818int len, bool more, u32 info, bool allow_direct)819{820struct sk_buff *skb = q->rx_head;821struct skb_shared_info *shinfo = skb_shinfo(skb);822int nr_frags = shinfo->nr_frags;823824if (nr_frags < ARRAY_SIZE(shinfo->frags)) {825struct page *page = virt_to_head_page(data);826#if defined(__linux__)827int offset = data - page_address(page) + q->buf_offset;828#elif defined(__FreeBSD__)829int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset;830#endif831832skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);833} else {834mt76_put_page_pool_buf(data, allow_direct);835}836837if (more)838return;839840q->rx_head = NULL;841if (nr_frags < ARRAY_SIZE(shinfo->frags))842dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);843else844dev_kfree_skb(skb);845}846847static int848mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)849{850int len, data_len, done = 0, dma_idx;851struct sk_buff *skb;852unsigned char *data;853bool check_ddone = false;854bool allow_direct = !mt76_queue_is_wed_rx(q);855bool more;856857if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&858mt76_queue_is_wed_tx_free(q)) {859dma_idx = Q_READ(q, dma_idx);860check_ddone = true;861}862863while (done < budget) {864bool drop = false;865u32 info;866867if (check_ddone) {868if (q->tail == dma_idx)869dma_idx = Q_READ(q, dma_idx);870871if (q->tail == dma_idx)872break;873}874875data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,876&drop);877if (!data)878break;879880if (drop)881goto free_frag;882883if (q->rx_head)884data_len = q->buf_size;885else886data_len = SKB_WITH_OVERHEAD(q->buf_size);887888if (data_len < len + q->buf_offset) {889dev_kfree_skb(q->rx_head);890q->rx_head = NULL;891goto free_frag;892}893894if (q->rx_head) {895mt76_add_fragment(dev, q, data, len, more, info,896allow_direct);897continue;898}899900if (!more && dev->drv->rx_check &&901!(dev->drv->rx_check(dev, data, len)))902goto free_frag;903904skb = napi_build_skb(data, q->buf_size);905if (!skb)906goto free_frag;907908skb_reserve(skb, q->buf_offset);909skb_mark_for_recycle(skb);910911*(u32 *)skb->cb = info;912913__skb_put(skb, len);914done++;915916if (more) {917q->rx_head = skb;918continue;919}920921dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);922continue;923924free_frag:925mt76_put_page_pool_buf(data, allow_direct);926}927928mt76_dma_rx_fill(dev, q, true);929return done;930}931932int mt76_dma_rx_poll(struct napi_struct *napi, int budget)933{934struct mt76_dev *dev;935int qid, done = 0, cur;936937dev = mt76_priv(napi->dev);938qid = napi - dev->napi;939940rcu_read_lock();941942do {943cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);944mt76_rx_poll_complete(dev, qid, napi);945done += cur;946} while (cur && done < budget);947948rcu_read_unlock();949950if (done < budget && napi_complete(napi))951dev->drv->rx_poll_complete(dev, qid);952953return done;954}955EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);956957static int958mt76_dma_init(struct mt76_dev *dev,959int (*poll)(struct napi_struct *napi, int budget))960{961struct mt76_dev **priv;962int i;963964dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));965if (!dev->napi_dev)966return -ENOMEM;967968/* napi_dev private data points to mt76_dev parent, so, mt76_dev969* can be retrieved given napi_dev970*/971priv = netdev_priv(dev->napi_dev);972*priv = dev;973974dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));975if (!dev->tx_napi_dev) {976free_netdev(dev->napi_dev);977return -ENOMEM;978}979priv = netdev_priv(dev->tx_napi_dev);980*priv = dev;981982snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",983wiphy_name(dev->hw->wiphy));984dev->napi_dev->threaded = 1;985init_completion(&dev->mmio.wed_reset);986init_completion(&dev->mmio.wed_reset_complete);987988mt76_for_each_q_rx(dev, i) {989netif_napi_add(dev->napi_dev, &dev->napi[i], poll);990mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);991napi_enable(&dev->napi[i]);992}993994return 0;995}996997static const struct mt76_queue_ops mt76_dma_ops = {998.init = mt76_dma_init,999.alloc = mt76_dma_alloc_queue,1000.reset_q = mt76_dma_queue_reset,1001.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,1002.tx_queue_skb = mt76_dma_tx_queue_skb,1003.tx_cleanup = mt76_dma_tx_cleanup,1004.rx_cleanup = mt76_dma_rx_cleanup,1005.rx_reset = mt76_dma_rx_reset,1006.kick = mt76_dma_kick_queue,1007};10081009void mt76_dma_attach(struct mt76_dev *dev)1010{1011dev->queue_ops = &mt76_dma_ops;1012}1013EXPORT_SYMBOL_GPL(mt76_dma_attach);10141015void mt76_dma_cleanup(struct mt76_dev *dev)1016{1017int i;10181019mt76_worker_disable(&dev->tx_worker);1020napi_disable(&dev->tx_napi);1021netif_napi_del(&dev->tx_napi);10221023for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {1024struct mt76_phy *phy = dev->phys[i];1025int j;10261027if (!phy)1028continue;10291030for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)1031mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);1032}10331034for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)1035mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);10361037mt76_for_each_q_rx(dev, i) {1038struct mt76_queue *q = &dev->q_rx[i];10391040if (mtk_wed_device_active(&dev->mmio.wed) &&1041mt76_queue_is_wed_rro(q))1042continue;10431044netif_napi_del(&dev->napi[i]);1045mt76_dma_rx_cleanup(dev, q);10461047page_pool_destroy(q->page_pool);1048}10491050if (mtk_wed_device_active(&dev->mmio.wed))1051mtk_wed_device_detach(&dev->mmio.wed);10521053if (mtk_wed_device_active(&dev->mmio.wed_hif2))1054mtk_wed_device_detach(&dev->mmio.wed_hif2);10551056mt76_free_pending_txwi(dev);1057mt76_free_pending_rxwi(dev);1058free_netdev(dev->napi_dev);1059free_netdev(dev->tx_napi_dev);1060}1061EXPORT_SYMBOL_GPL(mt76_dma_cleanup);106210631064