Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/mediatek/mt76/mt7925/mac.c
106465 views
1
// SPDX-License-Identifier: BSD-3-Clause-Clear
2
/* Copyright (C) 2023 MediaTek Inc. */
3
4
#include <linux/devcoredump.h>
5
#include <linux/etherdevice.h>
6
#include <linux/timekeeping.h>
7
#include "mt7925.h"
8
#include "../dma.h"
9
#include "regd.h"
10
#include "mac.h"
11
#include "mcu.h"
12
13
bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask)
14
{
15
mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
16
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
17
18
return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
19
0, 5000);
20
}
21
22
static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
23
{
24
static const u8 ac_to_tid[] = {
25
[IEEE80211_AC_BE] = 0,
26
[IEEE80211_AC_BK] = 1,
27
[IEEE80211_AC_VI] = 4,
28
[IEEE80211_AC_VO] = 6
29
};
30
struct ieee80211_sta *sta;
31
struct mt792x_sta *msta;
32
struct mt792x_link_sta *mlink;
33
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
34
LIST_HEAD(sta_poll_list);
35
struct rate_info *rate;
36
s8 rssi[4];
37
int i;
38
39
spin_lock_bh(&dev->mt76.sta_poll_lock);
40
list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
41
spin_unlock_bh(&dev->mt76.sta_poll_lock);
42
43
while (true) {
44
bool clear = false;
45
u32 addr, val;
46
u16 idx;
47
u8 bw;
48
49
if (list_empty(&sta_poll_list))
50
break;
51
mlink = list_first_entry(&sta_poll_list,
52
struct mt792x_link_sta, wcid.poll_list);
53
msta = mlink->sta;
54
spin_lock_bh(&dev->mt76.sta_poll_lock);
55
list_del_init(&mlink->wcid.poll_list);
56
spin_unlock_bh(&dev->mt76.sta_poll_lock);
57
58
idx = mlink->wcid.idx;
59
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET);
60
61
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
62
u32 tx_last = mlink->airtime_ac[i];
63
u32 rx_last = mlink->airtime_ac[i + 4];
64
65
mlink->airtime_ac[i] = mt76_rr(dev, addr);
66
mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
67
68
tx_time[i] = mlink->airtime_ac[i] - tx_last;
69
rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
70
71
if ((tx_last | rx_last) & BIT(30))
72
clear = true;
73
74
addr += 8;
75
}
76
77
if (clear) {
78
mt7925_mac_wtbl_update(dev, idx,
79
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
80
memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
81
}
82
83
if (!mlink->wcid.sta)
84
continue;
85
86
sta = container_of((void *)msta, struct ieee80211_sta,
87
drv_priv);
88
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
89
u8 q = mt76_connac_lmac_mapping(i);
90
u32 tx_cur = tx_time[q];
91
u32 rx_cur = rx_time[q];
92
u8 tid = ac_to_tid[i];
93
94
if (!tx_cur && !rx_cur)
95
continue;
96
97
ieee80211_sta_register_airtime(sta, tid, tx_cur,
98
rx_cur);
99
}
100
101
/* We don't support reading GI info from txs packets.
102
* For accurate tx status reporting and AQL improvement,
103
* we need to make sure that flags match so polling GI
104
* from per-sta counters directly.
105
*/
106
rate = &mlink->wcid.rate;
107
108
switch (rate->bw) {
109
case RATE_INFO_BW_160:
110
bw = IEEE80211_STA_RX_BW_160;
111
break;
112
case RATE_INFO_BW_80:
113
bw = IEEE80211_STA_RX_BW_80;
114
break;
115
case RATE_INFO_BW_40:
116
bw = IEEE80211_STA_RX_BW_40;
117
break;
118
default:
119
bw = IEEE80211_STA_RX_BW_20;
120
break;
121
}
122
123
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 6);
124
val = mt76_rr(dev, addr);
125
if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
126
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 5);
127
val = mt76_rr(dev, addr);
128
rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
129
} else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
130
u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw;
131
132
rate->he_gi = (val & (0x3 << offs)) >> offs;
133
} else if (rate->flags &
134
(RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
135
if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw))
136
rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
137
else
138
rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
139
}
140
141
/* get signal strength of resp frames (CTS/BA/ACK) */
142
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 34);
143
val = mt76_rr(dev, addr);
144
145
rssi[0] = to_rssi(GENMASK(7, 0), val);
146
rssi[1] = to_rssi(GENMASK(15, 8), val);
147
rssi[2] = to_rssi(GENMASK(23, 16), val);
148
rssi[3] = to_rssi(GENMASK(31, 14), val);
149
150
mlink->ack_signal =
151
mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
152
153
ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
154
}
155
}
156
157
void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev,
158
u8 tbl_idx, u16 rate_idx)
159
{
160
u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
161
162
mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
163
/* use wtbl spe idx */
164
mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
165
mt76_wr(dev, MT_WTBL_ITCR, ctrl);
166
}
167
168
/* The HW does not translate the mac header to 802.3 for mesh point */
169
static int mt7925_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
170
{
171
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
172
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
173
struct mt792x_sta *msta = (struct mt792x_sta *)status->wcid;
174
__le32 *rxd = (__le32 *)skb->data;
175
struct ieee80211_sta *sta;
176
struct ieee80211_vif *vif;
177
struct ieee80211_hdr hdr;
178
u16 frame_control;
179
180
if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
181
MT_RXD3_NORMAL_U2M)
182
return -EINVAL;
183
184
if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
185
return -EINVAL;
186
187
if (!msta || !msta->vif)
188
return -EINVAL;
189
190
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
191
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
192
193
/* store the info from RXD and ethhdr to avoid being overridden */
194
frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
195
hdr.frame_control = cpu_to_le16(frame_control);
196
hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
197
hdr.duration_id = 0;
198
199
ether_addr_copy(hdr.addr1, vif->addr);
200
ether_addr_copy(hdr.addr2, sta->addr);
201
switch (frame_control & (IEEE80211_FCTL_TODS |
202
IEEE80211_FCTL_FROMDS)) {
203
case 0:
204
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
205
break;
206
case IEEE80211_FCTL_FROMDS:
207
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
208
break;
209
case IEEE80211_FCTL_TODS:
210
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
211
break;
212
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
213
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
214
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
215
break;
216
default:
217
break;
218
}
219
220
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
221
if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
222
eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
223
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
224
else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
225
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
226
else
227
skb_pull(skb, 2);
228
229
if (ieee80211_has_order(hdr.frame_control))
230
memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
231
IEEE80211_HT_CTL_LEN);
232
if (ieee80211_is_data_qos(hdr.frame_control)) {
233
__le16 qos_ctrl;
234
235
qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
236
memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
237
IEEE80211_QOS_CTL_LEN);
238
}
239
240
if (ieee80211_has_a4(hdr.frame_control))
241
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
242
else
243
memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
244
245
return 0;
246
}
247
248
static int
249
mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
250
struct mt76_rx_status *status,
251
struct ieee80211_supported_band *sband,
252
__le32 *rxv, u8 *mode)
253
{
254
u32 v0, v2;
255
u8 stbc, gi, bw, dcm, nss;
256
int i, idx;
257
bool cck = false;
258
259
v0 = le32_to_cpu(rxv[0]);
260
v2 = le32_to_cpu(rxv[2]);
261
262
idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
263
i = idx;
264
nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
265
266
stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
267
gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
268
*mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
269
dcm = FIELD_GET(MT_PRXV_DCM, v2);
270
bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
271
272
switch (*mode) {
273
case MT_PHY_TYPE_CCK:
274
cck = true;
275
fallthrough;
276
case MT_PHY_TYPE_OFDM:
277
i = mt76_get_rate(&dev->mt76, sband, i, cck);
278
break;
279
case MT_PHY_TYPE_HT_GF:
280
case MT_PHY_TYPE_HT:
281
status->encoding = RX_ENC_HT;
282
if (gi)
283
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
284
if (i > 31)
285
return -EINVAL;
286
break;
287
case MT_PHY_TYPE_VHT:
288
status->nss = nss;
289
status->encoding = RX_ENC_VHT;
290
if (gi)
291
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
292
if (i > 11)
293
return -EINVAL;
294
break;
295
case MT_PHY_TYPE_HE_MU:
296
case MT_PHY_TYPE_HE_SU:
297
case MT_PHY_TYPE_HE_EXT_SU:
298
case MT_PHY_TYPE_HE_TB:
299
status->nss = nss;
300
status->encoding = RX_ENC_HE;
301
i &= GENMASK(3, 0);
302
303
if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
304
status->he_gi = gi;
305
306
status->he_dcm = dcm;
307
break;
308
case MT_PHY_TYPE_EHT_SU:
309
case MT_PHY_TYPE_EHT_TRIG:
310
case MT_PHY_TYPE_EHT_MU:
311
status->nss = nss;
312
status->encoding = RX_ENC_EHT;
313
i &= GENMASK(3, 0);
314
315
if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
316
status->eht.gi = gi;
317
break;
318
default:
319
return -EINVAL;
320
}
321
status->rate_idx = i;
322
323
switch (bw) {
324
case IEEE80211_STA_RX_BW_20:
325
break;
326
case IEEE80211_STA_RX_BW_40:
327
if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
328
(idx & MT_PRXV_TX_ER_SU_106T)) {
329
status->bw = RATE_INFO_BW_HE_RU;
330
status->he_ru =
331
NL80211_RATE_INFO_HE_RU_ALLOC_106;
332
} else {
333
status->bw = RATE_INFO_BW_40;
334
}
335
break;
336
case IEEE80211_STA_RX_BW_80:
337
status->bw = RATE_INFO_BW_80;
338
break;
339
case IEEE80211_STA_RX_BW_160:
340
status->bw = RATE_INFO_BW_160;
341
break;
342
default:
343
return -EINVAL;
344
}
345
346
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
347
if (*mode < MT_PHY_TYPE_HE_SU && gi)
348
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
349
350
return 0;
351
}
352
353
static int
354
mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
355
{
356
u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
357
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
358
bool hdr_trans, unicast, insert_ccmp_hdr = false;
359
u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
360
u16 hdr_gap;
361
__le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
362
struct mt76_phy *mphy = &dev->mt76.phy;
363
struct mt792x_phy *phy = &dev->phy;
364
struct ieee80211_supported_band *sband;
365
u32 csum_status = *(u32 *)skb->cb;
366
u32 rxd1 = le32_to_cpu(rxd[1]);
367
u32 rxd2 = le32_to_cpu(rxd[2]);
368
u32 rxd3 = le32_to_cpu(rxd[3]);
369
u32 rxd4 = le32_to_cpu(rxd[4]);
370
struct mt792x_link_sta *mlink;
371
u8 mode = 0; /* , band_idx; */
372
u16 seq_ctrl = 0;
373
__le16 fc = 0;
374
int idx;
375
376
memset(status, 0, sizeof(*status));
377
378
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
379
return -EINVAL;
380
381
if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
382
return -EINVAL;
383
384
hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
385
if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
386
return -EINVAL;
387
388
/* ICV error or CCMP/BIP/WPI MIC error */
389
if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
390
status->flag |= RX_FLAG_ONLY_MONITOR;
391
392
chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
393
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
394
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
395
status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
396
397
if (status->wcid) {
398
mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
399
mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
400
}
401
402
mt792x_get_status_freq_info(status, chfreq);
403
404
switch (status->band) {
405
case NL80211_BAND_5GHZ:
406
sband = &mphy->sband_5g.sband;
407
break;
408
case NL80211_BAND_6GHZ:
409
sband = &mphy->sband_6g.sband;
410
break;
411
default:
412
sband = &mphy->sband_2g.sband;
413
break;
414
}
415
416
if (!sband->channels)
417
return -EINVAL;
418
419
if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask &&
420
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
421
skb->ip_summed = CHECKSUM_UNNECESSARY;
422
423
if (rxd3 & MT_RXD3_NORMAL_FCS_ERR)
424
status->flag |= RX_FLAG_FAILED_FCS_CRC;
425
426
if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
427
status->flag |= RX_FLAG_MMIC_ERROR;
428
429
if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
430
!(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
431
status->flag |= RX_FLAG_DECRYPTED;
432
status->flag |= RX_FLAG_IV_STRIPPED;
433
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
434
}
435
436
remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
437
438
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
439
return -EINVAL;
440
441
rxd += 8;
442
if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
443
u32 v0 = le32_to_cpu(rxd[0]);
444
u32 v2 = le32_to_cpu(rxd[2]);
445
446
/* TODO: need to map rxd address */
447
fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
448
seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
449
qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
450
451
rxd += 4;
452
if ((u8 *)rxd - skb->data >= skb->len)
453
return -EINVAL;
454
}
455
456
if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
457
u8 *data = (u8 *)rxd;
458
459
if (status->flag & RX_FLAG_DECRYPTED) {
460
switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
461
case MT_CIPHER_AES_CCMP:
462
case MT_CIPHER_CCMP_CCX:
463
case MT_CIPHER_CCMP_256:
464
insert_ccmp_hdr =
465
FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
466
fallthrough;
467
case MT_CIPHER_TKIP:
468
case MT_CIPHER_TKIP_NO_MIC:
469
case MT_CIPHER_GCMP:
470
case MT_CIPHER_GCMP_256:
471
status->iv[0] = data[5];
472
status->iv[1] = data[4];
473
status->iv[2] = data[3];
474
status->iv[3] = data[2];
475
status->iv[4] = data[1];
476
status->iv[5] = data[0];
477
break;
478
default:
479
break;
480
}
481
}
482
rxd += 4;
483
if ((u8 *)rxd - skb->data >= skb->len)
484
return -EINVAL;
485
}
486
487
if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
488
status->timestamp = le32_to_cpu(rxd[0]);
489
status->flag |= RX_FLAG_MACTIME_START;
490
491
if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
492
status->flag |= RX_FLAG_AMPDU_DETAILS;
493
494
/* all subframes of an A-MPDU have the same timestamp */
495
if (phy->rx_ampdu_ts != status->timestamp) {
496
if (!++phy->ampdu_ref)
497
phy->ampdu_ref++;
498
}
499
phy->rx_ampdu_ts = status->timestamp;
500
501
status->ampdu_ref = phy->ampdu_ref;
502
}
503
504
rxd += 4;
505
if ((u8 *)rxd - skb->data >= skb->len)
506
return -EINVAL;
507
}
508
509
/* RXD Group 3 - P-RXV */
510
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
511
u32 v3;
512
int ret;
513
514
rxv = rxd;
515
rxd += 4;
516
if ((u8 *)rxd - skb->data >= skb->len)
517
return -EINVAL;
518
519
v3 = le32_to_cpu(rxv[3]);
520
521
status->chains = mphy->antenna_mask;
522
status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
523
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
524
status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
525
status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
526
527
/* RXD Group 5 - C-RXV */
528
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
529
rxd += 24;
530
if ((u8 *)rxd - skb->data >= skb->len)
531
return -EINVAL;
532
}
533
534
ret = mt7925_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
535
if (ret < 0)
536
return ret;
537
}
538
539
amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
540
status->amsdu = !!amsdu_info;
541
if (status->amsdu) {
542
status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
543
status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
544
}
545
546
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
547
if (hdr_trans && ieee80211_has_morefrags(fc)) {
548
if (mt7925_reverse_frag0_hdr_trans(skb, hdr_gap))
549
return -EINVAL;
550
hdr_trans = false;
551
} else {
552
int pad_start = 0;
553
554
skb_pull(skb, hdr_gap);
555
if (!hdr_trans && status->amsdu) {
556
pad_start = ieee80211_get_hdrlen_from_skb(skb);
557
} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
558
/* When header translation failure is indicated,
559
* the hardware will insert an extra 2-byte field
560
* containing the data length after the protocol
561
* type field.
562
*/
563
pad_start = 12;
564
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
565
pad_start += 4;
566
else
567
pad_start = 0;
568
}
569
570
if (pad_start) {
571
memmove(skb->data + 2, skb->data, pad_start);
572
skb_pull(skb, 2);
573
}
574
}
575
576
if (!hdr_trans) {
577
struct ieee80211_hdr *hdr;
578
579
if (insert_ccmp_hdr) {
580
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
581
582
mt76_insert_ccmp_hdr(skb, key_id);
583
}
584
585
hdr = mt76_skb_get_hdr(skb);
586
fc = hdr->frame_control;
587
if (ieee80211_is_data_qos(fc)) {
588
seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
589
qos_ctl = *ieee80211_get_qos_ctl(hdr);
590
}
591
skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
592
} else {
593
status->flag |= RX_FLAG_8023;
594
}
595
596
mt792x_mac_assoc_rssi(dev, skb);
597
598
if (rxv && !(status->flag & RX_FLAG_8023)) {
599
switch (status->encoding) {
600
case RX_ENC_EHT:
601
mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
602
break;
603
case RX_ENC_HE:
604
mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
605
break;
606
default:
607
break;
608
}
609
}
610
611
if (!status->wcid || !ieee80211_is_data_qos(fc))
612
return 0;
613
614
status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
615
status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
616
status->qos_ctl = qos_ctl;
617
618
return 0;
619
}
620
621
static void
622
mt7925_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
623
struct mt76_wcid *wcid)
624
{
625
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
626
u8 fc_type, fc_stype;
627
u16 ethertype;
628
bool wmm = false;
629
u32 val;
630
631
if (wcid->sta) {
632
struct ieee80211_sta *sta;
633
634
sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
635
wmm = sta->wme;
636
}
637
638
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
639
FIELD_PREP(MT_TXD1_TID, tid);
640
641
ethertype = get_unaligned_be16(&skb->data[12]);
642
if (ethertype >= ETH_P_802_3_MIN)
643
val |= MT_TXD1_ETH_802_3;
644
645
txwi[1] |= cpu_to_le32(val);
646
647
fc_type = IEEE80211_FTYPE_DATA >> 2;
648
fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
649
650
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
651
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
652
653
txwi[2] |= cpu_to_le32(val);
654
}
655
656
static void
657
mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
658
struct sk_buff *skb,
659
struct ieee80211_key_conf *key)
660
{
661
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
662
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
663
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
664
bool multicast = is_multicast_ether_addr(hdr->addr1);
665
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
666
__le16 fc = hdr->frame_control;
667
u8 fc_type, fc_stype;
668
u32 val;
669
670
if (ieee80211_is_action(fc) &&
671
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
672
mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
673
tid = MT_TX_ADDBA;
674
else if (ieee80211_is_mgmt(hdr->frame_control))
675
tid = MT_TX_NORMAL;
676
677
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
678
FIELD_PREP(MT_TXD1_HDR_INFO,
679
ieee80211_get_hdrlen_from_skb(skb) / 2) |
680
FIELD_PREP(MT_TXD1_TID, tid);
681
682
if (!ieee80211_is_data(fc) || multicast ||
683
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
684
val |= MT_TXD1_FIXED_RATE;
685
686
if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
687
key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
688
val |= MT_TXD1_BIP;
689
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
690
}
691
692
txwi[1] |= cpu_to_le32(val);
693
694
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
695
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
696
697
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
698
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
699
700
txwi[2] |= cpu_to_le32(val);
701
702
txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
703
if (ieee80211_is_beacon(fc))
704
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
705
706
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
707
u16 seqno = le16_to_cpu(hdr->seq_ctrl);
708
709
if (ieee80211_is_back_req(hdr->frame_control)) {
710
struct ieee80211_bar *bar;
711
712
bar = (struct ieee80211_bar *)skb->data;
713
seqno = le16_to_cpu(bar->start_seq_num);
714
}
715
716
val = MT_TXD3_SN_VALID |
717
FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
718
txwi[3] |= cpu_to_le32(val);
719
txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
720
}
721
}
722
723
void
724
mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
725
struct sk_buff *skb, struct mt76_wcid *wcid,
726
struct ieee80211_key_conf *key, int pid,
727
enum mt76_txq_id qid, u32 changed)
728
{
729
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
730
struct ieee80211_vif *vif = info->control.vif;
731
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
732
u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
733
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
734
struct mt76_vif_link *mvif;
735
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
736
BSS_CHANGED_BEACON_ENABLED));
737
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
738
BSS_CHANGED_FILS_DISCOVERY));
739
struct mt792x_bss_conf *mconf;
740
741
mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
742
wcid->link_id) : NULL;
743
mvif = mconf ? (struct mt76_vif_link *)&mconf->mt76 : NULL;
744
745
if (mvif) {
746
omac_idx = mvif->omac_idx;
747
wmm_idx = mvif->wmm_idx;
748
band_idx = mvif->band_idx;
749
}
750
751
if (inband_disc) {
752
p_fmt = MT_TX_TYPE_FW;
753
q_idx = MT_LMAC_ALTX0;
754
} else if (beacon) {
755
p_fmt = MT_TX_TYPE_FW;
756
q_idx = MT_LMAC_BCN0;
757
} else if (qid >= MT_TXQ_PSD) {
758
p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
759
q_idx = MT_LMAC_ALTX0;
760
} else {
761
p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
762
q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
763
mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
764
765
/* counting non-offloading skbs */
766
wcid->stats.tx_bytes += skb->len;
767
wcid->stats.tx_packets++;
768
}
769
770
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
771
FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
772
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
773
txwi[0] = cpu_to_le32(val);
774
775
val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
776
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
777
778
if (band_idx)
779
val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
780
781
txwi[1] = cpu_to_le32(val);
782
txwi[2] = 0;
783
784
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15);
785
786
if (key)
787
val |= MT_TXD3_PROTECT_FRAME;
788
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
789
val |= MT_TXD3_NO_ACK;
790
if (wcid->amsdu)
791
val |= MT_TXD3_HW_AMSDU;
792
793
txwi[3] = cpu_to_le32(val);
794
txwi[4] = 0;
795
796
val = FIELD_PREP(MT_TXD5_PID, pid);
797
if (pid >= MT_PACKET_ID_FIRST) {
798
val |= MT_TXD5_TX_STATUS_HOST;
799
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
800
txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
801
}
802
803
txwi[5] = cpu_to_le32(val);
804
805
val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
806
if (!ieee80211_vif_is_mld(vif) ||
807
(q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0))
808
val |= MT_TXD6_DIS_MAT;
809
txwi[6] = cpu_to_le32(val);
810
txwi[7] = 0;
811
812
if (is_8023)
813
mt7925_mac_write_txwi_8023(txwi, skb, wcid);
814
else
815
mt7925_mac_write_txwi_80211(dev, txwi, skb, key);
816
817
if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
818
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
819
bool mcast = ieee80211_is_data(hdr->frame_control) &&
820
is_multicast_ether_addr(hdr->addr1);
821
u8 idx = MT792x_BASIC_RATES_TBL;
822
823
if (mvif) {
824
if (mcast && mvif->mcast_rates_idx)
825
idx = mvif->mcast_rates_idx;
826
else if (beacon && mvif->beacon_rates_idx)
827
idx = mvif->beacon_rates_idx;
828
else
829
idx = mvif->basic_rates_idx;
830
}
831
832
txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
833
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
834
}
835
}
836
EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi);
837
838
static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb,
839
struct mt76_wcid *wcid)
840
{
841
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
842
struct ieee80211_link_sta *link_sta;
843
struct mt792x_link_sta *mlink;
844
struct mt792x_sta *msta;
845
bool is_8023;
846
u16 fc, tid;
847
848
link_sta = rcu_dereference(sta->link[wcid->link_id]);
849
if (!link_sta)
850
return;
851
852
if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
853
return;
854
855
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
856
is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
857
858
if (is_8023) {
859
fc = IEEE80211_FTYPE_DATA |
860
(sta->wme ? IEEE80211_STYPE_QOS_DATA :
861
IEEE80211_STYPE_DATA);
862
} else {
863
/* No need to get precise TID for Action/Management Frame,
864
* since it will not meet the following Frame Control
865
* condition anyway.
866
*/
867
868
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
869
870
fc = le16_to_cpu(hdr->frame_control) &
871
(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
872
}
873
874
if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
875
return;
876
877
msta = (struct mt792x_sta *)sta->drv_priv;
878
879
if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED)
880
mlink = rcu_dereference(msta->link[msta->deflink_id]);
881
else
882
mlink = &msta->deflink;
883
884
if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state))
885
ieee80211_start_tx_ba_session(sta, tid, 0);
886
}
887
888
static bool
889
mt7925_mac_add_txs_skb(struct mt792x_dev *dev, struct mt76_wcid *wcid,
890
int pid, __le32 *txs_data)
891
{
892
struct mt76_sta_stats *stats = &wcid->stats;
893
struct ieee80211_supported_band *sband;
894
struct mt76_dev *mdev = &dev->mt76;
895
struct mt76_phy *mphy;
896
struct ieee80211_tx_info *info;
897
struct sk_buff_head list;
898
struct rate_info rate = {};
899
struct sk_buff *skb;
900
bool cck = false;
901
u32 txrate, txs, mode, stbc;
902
903
mt76_tx_status_lock(mdev, &list);
904
skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
905
if (!skb)
906
goto out_no_skb;
907
908
txs = le32_to_cpu(txs_data[0]);
909
910
info = IEEE80211_SKB_CB(skb);
911
if (!(txs & MT_TXS0_ACK_ERROR_MASK))
912
info->flags |= IEEE80211_TX_STAT_ACK;
913
914
info->status.ampdu_len = 1;
915
info->status.ampdu_ack_len = !!(info->flags &
916
IEEE80211_TX_STAT_ACK);
917
918
info->status.rates[0].idx = -1;
919
920
txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
921
922
rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
923
rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
924
stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
925
926
if (stbc && rate.nss > 1)
927
rate.nss >>= 1;
928
929
if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
930
stats->tx_nss[rate.nss - 1]++;
931
if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
932
stats->tx_mcs[rate.mcs]++;
933
934
mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
935
switch (mode) {
936
case MT_PHY_TYPE_CCK:
937
cck = true;
938
fallthrough;
939
case MT_PHY_TYPE_OFDM:
940
mphy = mt76_dev_phy(mdev, wcid->phy_idx);
941
942
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
943
sband = &mphy->sband_5g.sband;
944
else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
945
sband = &mphy->sband_6g.sband;
946
else
947
sband = &mphy->sband_2g.sband;
948
949
rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
950
rate.legacy = sband->bitrates[rate.mcs].bitrate;
951
break;
952
case MT_PHY_TYPE_HT:
953
case MT_PHY_TYPE_HT_GF:
954
if (rate.mcs > 31)
955
goto out;
956
957
rate.flags = RATE_INFO_FLAGS_MCS;
958
if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
959
rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
960
break;
961
case MT_PHY_TYPE_VHT:
962
if (rate.mcs > 9)
963
goto out;
964
965
rate.flags = RATE_INFO_FLAGS_VHT_MCS;
966
break;
967
case MT_PHY_TYPE_HE_SU:
968
case MT_PHY_TYPE_HE_EXT_SU:
969
case MT_PHY_TYPE_HE_TB:
970
case MT_PHY_TYPE_HE_MU:
971
if (rate.mcs > 11)
972
goto out;
973
974
rate.he_gi = wcid->rate.he_gi;
975
rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
976
rate.flags = RATE_INFO_FLAGS_HE_MCS;
977
break;
978
case MT_PHY_TYPE_EHT_SU:
979
case MT_PHY_TYPE_EHT_TRIG:
980
case MT_PHY_TYPE_EHT_MU:
981
if (rate.mcs > 13)
982
goto out;
983
984
rate.eht_gi = wcid->rate.eht_gi;
985
rate.flags = RATE_INFO_FLAGS_EHT_MCS;
986
break;
987
default:
988
goto out;
989
}
990
991
stats->tx_mode[mode]++;
992
993
switch (FIELD_GET(MT_TXS0_BW, txs)) {
994
case IEEE80211_STA_RX_BW_160:
995
rate.bw = RATE_INFO_BW_160;
996
stats->tx_bw[3]++;
997
break;
998
case IEEE80211_STA_RX_BW_80:
999
rate.bw = RATE_INFO_BW_80;
1000
stats->tx_bw[2]++;
1001
break;
1002
case IEEE80211_STA_RX_BW_40:
1003
rate.bw = RATE_INFO_BW_40;
1004
stats->tx_bw[1]++;
1005
break;
1006
default:
1007
rate.bw = RATE_INFO_BW_20;
1008
stats->tx_bw[0]++;
1009
break;
1010
}
1011
wcid->rate = rate;
1012
1013
out:
1014
mt76_tx_status_skb_done(mdev, skb, &list);
1015
1016
out_no_skb:
1017
mt76_tx_status_unlock(mdev, &list);
1018
1019
return !!skb;
1020
}
1021
1022
void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
1023
{
1024
struct mt792x_link_sta *mlink = NULL;
1025
struct mt76_wcid *wcid;
1026
__le32 *txs_data = data;
1027
u16 wcidx;
1028
u8 pid;
1029
1030
if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1031
return;
1032
1033
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1034
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1035
1036
if (pid < MT_PACKET_ID_FIRST)
1037
return;
1038
1039
if (wcidx >= MT792x_WTBL_SIZE)
1040
return;
1041
1042
rcu_read_lock();
1043
1044
wcid = mt76_wcid_ptr(dev, wcidx);
1045
if (!wcid)
1046
goto out;
1047
1048
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1049
1050
mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data);
1051
if (!wcid->sta)
1052
goto out;
1053
1054
mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
1055
1056
out:
1057
rcu_read_unlock();
1058
}
1059
1060
void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
1061
struct ieee80211_sta *sta, struct mt76_wcid *wcid,
1062
struct list_head *free_list)
1063
{
1064
struct mt76_dev *mdev = &dev->mt76;
1065
__le32 *txwi;
1066
u16 wcid_idx;
1067
1068
mt76_connac_txp_skb_unmap(mdev, t);
1069
if (!t->skb)
1070
goto out;
1071
1072
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1073
if (sta) {
1074
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1075
mt7925_tx_check_aggr(sta, t->skb, wcid);
1076
1077
wcid_idx = wcid->idx;
1078
} else {
1079
wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1080
}
1081
1082
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1083
out:
1084
t->skb = NULL;
1085
mt76_put_txwi(mdev, t);
1086
}
1087
EXPORT_SYMBOL_GPL(mt7925_txwi_free);
1088
1089
static void
1090
mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
1091
{
1092
__le32 *tx_free = (__le32 *)data, *cur_info;
1093
struct mt76_dev *mdev = &dev->mt76;
1094
struct mt76_txwi_cache *txwi;
1095
struct ieee80211_sta *sta = NULL;
1096
struct mt76_wcid *wcid = NULL;
1097
LIST_HEAD(free_list);
1098
struct sk_buff *skb, *tmp;
1099
#if defined(__linux__)
1100
void *end = data + len;
1101
#elif defined(__FreeBSD__)
1102
void *end = (u8 *)data + len;
1103
#endif
1104
bool wake = false;
1105
u16 total, count = 0;
1106
1107
/* clean DMA queues and unmap buffers first */
1108
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1109
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1110
1111
if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1112
return;
1113
1114
total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1115
for (cur_info = &tx_free[2]; count < total; cur_info++) {
1116
u32 msdu, info;
1117
u8 i;
1118
1119
if (WARN_ON_ONCE((void *)cur_info >= end))
1120
return;
1121
/* 1'b1: new wcid pair.
1122
* 1'b0: msdu_id with the same 'wcid pair' as above.
1123
*/
1124
info = le32_to_cpu(*cur_info);
1125
if (info & MT_TXFREE_INFO_PAIR) {
1126
struct mt792x_link_sta *mlink;
1127
u16 idx;
1128
1129
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1130
wcid = mt76_wcid_ptr(dev, idx);
1131
sta = wcid_to_sta(wcid);
1132
if (!sta)
1133
continue;
1134
1135
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1136
mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
1137
continue;
1138
}
1139
1140
if (info & MT_TXFREE_INFO_HEADER) {
1141
if (wcid) {
1142
wcid->stats.tx_retries +=
1143
FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1144
wcid->stats.tx_failed +=
1145
!!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1146
}
1147
continue;
1148
}
1149
1150
for (i = 0; i < 2; i++) {
1151
msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1152
if (msdu == MT_TXFREE_INFO_MSDU_ID)
1153
continue;
1154
1155
count++;
1156
txwi = mt76_token_release(mdev, msdu, &wake);
1157
if (!txwi)
1158
continue;
1159
1160
mt7925_txwi_free(dev, txwi, sta, wcid, &free_list);
1161
}
1162
}
1163
1164
mt7925_mac_sta_poll(dev);
1165
1166
if (wake)
1167
mt76_set_tx_blocked(&dev->mt76, false);
1168
1169
mt76_worker_schedule(&dev->mt76.tx_worker);
1170
1171
list_for_each_entry_safe(skb, tmp, &free_list, list) {
1172
skb_list_del_init(skb);
1173
napi_consume_skb(skb, 1);
1174
}
1175
}
1176
1177
bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len)
1178
{
1179
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1180
__le32 *rxd = (__le32 *)data;
1181
__le32 *end = (__le32 *)&rxd[len / 4];
1182
enum rx_pkt_type type;
1183
1184
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1185
if (type != PKT_TYPE_NORMAL) {
1186
u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1187
1188
if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1189
MT_RXD0_SW_PKT_TYPE_FRAME))
1190
return true;
1191
}
1192
1193
switch (type) {
1194
case PKT_TYPE_TXRX_NOTIFY:
1195
/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1196
mt7925_mac_tx_free(dev, data, len); /* mmio */
1197
return false;
1198
case PKT_TYPE_TXS:
1199
for (rxd += 4; rxd + 12 <= end; rxd += 12)
1200
mt7925_mac_add_txs(dev, rxd);
1201
return false;
1202
default:
1203
return true;
1204
}
1205
}
1206
EXPORT_SYMBOL_GPL(mt7925_rx_check);
1207
1208
void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1209
struct sk_buff *skb, u32 *info)
1210
{
1211
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1212
__le32 *rxd = (__le32 *)skb->data;
1213
__le32 *end = (__le32 *)&skb->data[skb->len];
1214
enum rx_pkt_type type;
1215
u16 flag;
1216
1217
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1218
flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
1219
if (type != PKT_TYPE_NORMAL) {
1220
u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1221
1222
if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1223
MT_RXD0_SW_PKT_TYPE_FRAME))
1224
type = PKT_TYPE_NORMAL;
1225
}
1226
1227
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1228
type = PKT_TYPE_NORMAL_MCU;
1229
1230
switch (type) {
1231
case PKT_TYPE_TXRX_NOTIFY:
1232
/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1233
mt7925_mac_tx_free(dev, skb->data, skb->len);
1234
napi_consume_skb(skb, 1);
1235
break;
1236
case PKT_TYPE_RX_EVENT:
1237
mt7925_mcu_rx_event(dev, skb);
1238
break;
1239
case PKT_TYPE_TXS:
1240
for (rxd += 2; rxd + 8 <= end; rxd += 8)
1241
mt7925_mac_add_txs(dev, rxd);
1242
dev_kfree_skb(skb);
1243
break;
1244
case PKT_TYPE_NORMAL_MCU:
1245
case PKT_TYPE_NORMAL:
1246
if (!mt7925_mac_fill_rx(dev, skb)) {
1247
mt76_rx(&dev->mt76, q, skb);
1248
return;
1249
}
1250
fallthrough;
1251
default:
1252
dev_kfree_skb(skb);
1253
break;
1254
}
1255
}
1256
EXPORT_SYMBOL_GPL(mt7925_queue_rx_skb);
1257
1258
static void
1259
mt7925_vif_connect_iter(void *priv, u8 *mac,
1260
struct ieee80211_vif *vif)
1261
{
1262
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
1263
unsigned long valid = ieee80211_vif_is_mld(vif) ?
1264
mvif->valid_links : BIT(0);
1265
struct mt792x_dev *dev = mvif->phy->dev;
1266
struct ieee80211_hw *hw = mt76_hw(dev);
1267
struct ieee80211_bss_conf *bss_conf;
1268
struct mt792x_bss_conf *mconf;
1269
int i;
1270
1271
if (vif->type == NL80211_IFTYPE_STATION)
1272
ieee80211_disconnect(vif, true);
1273
1274
for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
1275
bss_conf = mt792x_vif_to_bss_conf(vif, i);
1276
mconf = mt792x_vif_to_link(mvif, i);
1277
1278
mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, &mconf->mt76,
1279
&mvif->sta.deflink.wcid, true);
1280
mt7925_mcu_set_tx(dev, bss_conf);
1281
}
1282
1283
if (vif->type == NL80211_IFTYPE_AP) {
1284
mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
1285
true, NULL);
1286
mt7925_mcu_sta_update(dev, NULL, vif, true,
1287
MT76_STA_INFO_STATE_NONE);
1288
mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true);
1289
}
1290
}
1291
1292
/* system error recovery */
1293
void mt7925_mac_reset_work(struct work_struct *work)
1294
{
1295
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1296
reset_work);
1297
struct ieee80211_hw *hw = mt76_hw(dev);
1298
struct mt76_connac_pm *pm = &dev->pm;
1299
int i, ret;
1300
1301
dev_dbg(dev->mt76.dev, "chip reset\n");
1302
dev->hw_full_reset = true;
1303
ieee80211_stop_queues(hw);
1304
1305
cancel_delayed_work_sync(&dev->mphy.mac_work);
1306
cancel_delayed_work_sync(&pm->ps_work);
1307
cancel_work_sync(&pm->wake_work);
1308
1309
for (i = 0; i < 10; i++) {
1310
mutex_lock(&dev->mt76.mutex);
1311
ret = mt792x_dev_reset(dev);
1312
mutex_unlock(&dev->mt76.mutex);
1313
1314
if (!ret)
1315
break;
1316
}
1317
1318
if (i == 10)
1319
dev_err(dev->mt76.dev, "chip reset failed\n");
1320
1321
if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
1322
struct cfg80211_scan_info info = {
1323
.aborted = true,
1324
};
1325
1326
ieee80211_scan_completed(dev->mphy.hw, &info);
1327
}
1328
1329
dev->hw_full_reset = false;
1330
pm->suspended = false;
1331
ieee80211_wake_queues(hw);
1332
ieee80211_iterate_active_interfaces(hw,
1333
IEEE80211_IFACE_ITER_RESUME_ALL,
1334
mt7925_vif_connect_iter, NULL);
1335
mt76_connac_power_save_sched(&dev->mt76.phy, pm);
1336
1337
mt7925_regd_change(&dev->phy, "00");
1338
}
1339
1340
void mt7925_coredump_work(struct work_struct *work)
1341
{
1342
struct mt792x_dev *dev;
1343
char *dump, *data;
1344
1345
dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
1346
coredump.work.work);
1347
1348
if (time_is_after_jiffies(dev->coredump.last_activity +
1349
4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1350
queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1351
MT76_CONNAC_COREDUMP_TIMEOUT);
1352
return;
1353
}
1354
1355
dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1356
data = dump;
1357
1358
while (true) {
1359
struct sk_buff *skb;
1360
1361
spin_lock_bh(&dev->mt76.lock);
1362
skb = __skb_dequeue(&dev->coredump.msg_list);
1363
spin_unlock_bh(&dev->mt76.lock);
1364
1365
if (!skb)
1366
break;
1367
1368
skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 8);
1369
if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1370
dev_kfree_skb(skb);
1371
continue;
1372
}
1373
1374
memcpy(data, skb->data, skb->len);
1375
data += skb->len;
1376
1377
dev_kfree_skb(skb);
1378
}
1379
1380
if (dump)
1381
dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1382
GFP_KERNEL);
1383
1384
mt792x_reset(&dev->mt76);
1385
}
1386
1387
/* usb_sdio */
1388
static void
1389
mt7925_usb_sdio_write_txwi(struct mt792x_dev *dev, struct mt76_wcid *wcid,
1390
enum mt76_txq_id qid, struct ieee80211_sta *sta,
1391
struct ieee80211_key_conf *key, int pid,
1392
struct sk_buff *skb)
1393
{
1394
__le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
1395
1396
memset(txwi, 0, MT_SDIO_TXD_SIZE);
1397
mt7925_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
1398
skb_push(skb, MT_SDIO_TXD_SIZE);
1399
}
1400
1401
int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1402
enum mt76_txq_id qid, struct mt76_wcid *wcid,
1403
struct ieee80211_sta *sta,
1404
struct mt76_tx_info *tx_info)
1405
{
1406
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1407
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1408
struct ieee80211_key_conf *key = info->control.hw_key;
1409
struct sk_buff *skb = tx_info->skb;
1410
int err, pad, pktid;
1411
1412
if (unlikely(tx_info->skb->len <= ETH_HLEN))
1413
return -EINVAL;
1414
1415
if (!wcid)
1416
wcid = &dev->mt76.global_wcid;
1417
1418
if (sta) {
1419
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
1420
1421
if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
1422
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1423
msta->deflink.last_txs = jiffies;
1424
}
1425
}
1426
1427
pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
1428
mt7925_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
1429
1430
mt792x_skb_add_usb_sdio_hdr(dev, skb, 0);
1431
pad = round_up(skb->len, 4) - skb->len;
1432
if (mt76_is_usb(mdev))
1433
pad += 4;
1434
1435
err = mt76_skb_adjust_pad(skb, pad);
1436
if (err)
1437
/* Release pktid in case of error. */
1438
idr_remove(&wcid->pktid, pktid);
1439
1440
return err;
1441
}
1442
EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_prepare_skb);
1443
1444
void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
1445
struct mt76_queue_entry *e)
1446
{
1447
__le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
1448
unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
1449
struct ieee80211_sta *sta;
1450
struct mt76_wcid *wcid;
1451
u16 idx;
1452
1453
idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1454
wcid = __mt76_wcid_ptr(mdev, idx);
1455
sta = wcid_to_sta(wcid);
1456
1457
if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1458
mt7925_tx_check_aggr(sta, e->skb, wcid);
1459
1460
skb_pull(e->skb, headroom);
1461
mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1462
}
1463
EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_complete_skb);
1464
1465
bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
1466
{
1467
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1468
1469
mt792x_mutex_acquire(dev);
1470
mt7925_mac_sta_poll(dev);
1471
mt792x_mutex_release(dev);
1472
1473
return false;
1474
}
1475
EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_status_data);
1476
1477
#if IS_ENABLED(CONFIG_IPV6)
1478
void mt7925_set_ipv6_ns_work(struct work_struct *work)
1479
{
1480
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1481
ipv6_ns_work);
1482
struct sk_buff *skb;
1483
int ret = 0;
1484
1485
do {
1486
skb = skb_dequeue(&dev->ipv6_ns_list);
1487
1488
if (!skb)
1489
break;
1490
1491
mt792x_mutex_acquire(dev);
1492
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1493
MCU_UNI_CMD(OFFLOAD), true);
1494
mt792x_mutex_release(dev);
1495
1496
} while (!ret);
1497
1498
if (ret)
1499
skb_queue_purge(&dev->ipv6_ns_list);
1500
}
1501
#endif
1502
1503