Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/mediatek/mt76/mt7603/dma.c
48525 views
1
// SPDX-License-Identifier: ISC
2
3
#include "mt7603.h"
4
#include "mac.h"
5
#include "../dma.h"
6
7
static const u8 wmm_queue_map[] = {
8
[IEEE80211_AC_BK] = 0,
9
[IEEE80211_AC_BE] = 1,
10
[IEEE80211_AC_VI] = 2,
11
[IEEE80211_AC_VO] = 3,
12
};
13
14
static void
15
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
16
{
17
static const u8 tid_to_ac[8] = {
18
IEEE80211_AC_BE,
19
IEEE80211_AC_BK,
20
IEEE80211_AC_BK,
21
IEEE80211_AC_BE,
22
IEEE80211_AC_VI,
23
IEEE80211_AC_VI,
24
IEEE80211_AC_VO,
25
IEEE80211_AC_VO
26
};
27
__le32 *txd = (__le32 *)skb->data;
28
struct ieee80211_hdr *hdr;
29
struct ieee80211_sta *sta;
30
struct mt7603_sta *msta;
31
struct mt76_wcid *wcid;
32
u8 qid, tid = 0, hwq = 0;
33
void *priv;
34
int idx;
35
u32 val;
36
37
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
38
goto free;
39
40
val = le32_to_cpu(txd[1]);
41
idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
42
skb->priority = FIELD_GET(MT_TXD1_TID, val);
43
44
if (idx >= MT7603_WTBL_STA - 1)
45
goto free;
46
47
wcid = mt76_wcid_ptr(dev, idx);
48
if (!wcid)
49
goto free;
50
51
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
52
53
sta = container_of(priv, struct ieee80211_sta, drv_priv);
54
hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
55
56
hwq = wmm_queue_map[IEEE80211_AC_BE];
57
if (ieee80211_is_data_qos(hdr->frame_control)) {
58
tid = *ieee80211_get_qos_ctl(hdr) &
59
IEEE80211_QOS_CTL_TAG1D_MASK;
60
qid = tid_to_ac[tid];
61
hwq = wmm_queue_map[qid];
62
skb_set_queue_mapping(skb, qid);
63
} else if (ieee80211_is_data(hdr->frame_control)) {
64
skb_set_queue_mapping(skb, IEEE80211_AC_BE);
65
hwq = wmm_queue_map[IEEE80211_AC_BE];
66
} else {
67
skb_pull(skb, MT_TXD_SIZE);
68
if (!ieee80211_is_bufferable_mmpdu(skb))
69
goto free;
70
skb_push(skb, MT_TXD_SIZE);
71
skb_set_queue_mapping(skb, MT_TXQ_PSD);
72
hwq = MT_TX_HW_QUEUE_MGMT;
73
}
74
75
ieee80211_sta_set_buffered(sta, tid, true);
76
77
val = le32_to_cpu(txd[0]);
78
val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
79
val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq);
80
txd[0] = cpu_to_le32(val);
81
82
spin_lock_bh(&dev->ps_lock);
83
__skb_queue_tail(&msta->psq, skb);
84
if (skb_queue_len(&msta->psq) >= 64) {
85
skb = __skb_dequeue(&msta->psq);
86
dev_kfree_skb(skb);
87
}
88
spin_unlock_bh(&dev->ps_lock);
89
return;
90
91
free:
92
dev_kfree_skb(skb);
93
}
94
95
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
96
struct sk_buff *skb, u32 *info)
97
{
98
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
99
__le32 *rxd = (__le32 *)skb->data;
100
__le32 *end = (__le32 *)&skb->data[skb->len];
101
enum rx_pkt_type type;
102
103
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
104
105
if (q == MT_RXQ_MCU) {
106
if (type == PKT_TYPE_RX_EVENT)
107
mt76_mcu_rx_event(&dev->mt76, skb);
108
else
109
mt7603_rx_loopback_skb(dev, skb);
110
return;
111
}
112
113
switch (type) {
114
case PKT_TYPE_TXS:
115
for (rxd++; rxd + 5 <= end; rxd += 5)
116
mt7603_mac_add_txs(dev, rxd);
117
dev_kfree_skb(skb);
118
break;
119
case PKT_TYPE_RX_EVENT:
120
mt76_mcu_rx_event(&dev->mt76, skb);
121
return;
122
case PKT_TYPE_NORMAL:
123
if (mt7603_mac_fill_rx(dev, skb) == 0) {
124
mt76_rx(&dev->mt76, q, skb);
125
return;
126
}
127
fallthrough;
128
default:
129
dev_kfree_skb(skb);
130
break;
131
}
132
}
133
134
static int
135
mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
136
int idx, int n_desc, int bufsize)
137
{
138
int err;
139
140
err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
141
MT_RX_RING_BASE);
142
if (err < 0)
143
return err;
144
145
mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
146
147
return 0;
148
}
149
150
static int mt7603_poll_tx(struct napi_struct *napi, int budget)
151
{
152
struct mt7603_dev *dev;
153
int i;
154
155
dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
156
dev->tx_dma_check = 0;
157
158
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
159
for (i = MT_TXQ_PSD; i >= 0; i--)
160
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
161
162
if (napi_complete_done(napi, 0))
163
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
164
165
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
166
for (i = MT_TXQ_PSD; i >= 0; i--)
167
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
168
169
mt7603_mac_sta_poll(dev);
170
171
mt76_worker_schedule(&dev->mt76.tx_worker);
172
173
return 0;
174
}
175
176
int mt7603_dma_init(struct mt7603_dev *dev)
177
{
178
int ret;
179
int i;
180
181
mt76_dma_attach(&dev->mt76);
182
183
mt76_clear(dev, MT_WPDMA_GLO_CFG,
184
MT_WPDMA_GLO_CFG_TX_DMA_EN |
185
MT_WPDMA_GLO_CFG_RX_DMA_EN |
186
MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
187
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
188
189
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
190
mt7603_pse_client_reset(dev);
191
192
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
193
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
194
MT7603_TX_RING_SIZE, MT_TX_RING_BASE,
195
NULL, 0);
196
if (ret)
197
return ret;
198
}
199
200
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
201
MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
202
if (ret)
203
return ret;
204
205
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU,
206
MT_MCU_RING_SIZE, MT_TX_RING_BASE);
207
if (ret)
208
return ret;
209
210
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
211
MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
212
if (ret)
213
return ret;
214
215
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
216
MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
217
if (ret)
218
return ret;
219
220
mt7603_irq_enable(dev,
221
MT_INT_TX_DONE(IEEE80211_AC_VO) |
222
MT_INT_TX_DONE(IEEE80211_AC_VI) |
223
MT_INT_TX_DONE(IEEE80211_AC_BE) |
224
MT_INT_TX_DONE(IEEE80211_AC_BK) |
225
MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) |
226
MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU) |
227
MT_INT_TX_DONE(MT_TX_HW_QUEUE_BCN) |
228
MT_INT_TX_DONE(MT_TX_HW_QUEUE_BMC));
229
230
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
231
MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
232
if (ret)
233
return ret;
234
235
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
236
MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
237
if (ret)
238
return ret;
239
240
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
241
ret = mt76_init_queues(dev, mt76_dma_rx_poll);
242
if (ret)
243
return ret;
244
245
netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
246
mt7603_poll_tx);
247
napi_enable(&dev->mt76.tx_napi);
248
249
return 0;
250
}
251
252
void mt7603_dma_cleanup(struct mt7603_dev *dev)
253
{
254
mt76_clear(dev, MT_WPDMA_GLO_CFG,
255
MT_WPDMA_GLO_CFG_TX_DMA_EN |
256
MT_WPDMA_GLO_CFG_RX_DMA_EN |
257
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
258
259
mt76_dma_cleanup(&dev->mt76);
260
}
261
262