Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/mediatek/mt76/mt7925/mac.c
48526 views
1
// SPDX-License-Identifier: ISC
2
/* Copyright (C) 2023 MediaTek Inc. */
3
4
#include <linux/devcoredump.h>
5
#include <linux/etherdevice.h>
6
#include <linux/timekeeping.h>
7
#include "mt7925.h"
8
#include "../dma.h"
9
#include "mac.h"
10
#include "mcu.h"
11
12
bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask)
13
{
14
mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
15
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
16
17
return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
18
0, 5000);
19
}
20
21
static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
22
{
23
static const u8 ac_to_tid[] = {
24
[IEEE80211_AC_BE] = 0,
25
[IEEE80211_AC_BK] = 1,
26
[IEEE80211_AC_VI] = 4,
27
[IEEE80211_AC_VO] = 6
28
};
29
struct ieee80211_sta *sta;
30
struct mt792x_sta *msta;
31
struct mt792x_link_sta *mlink;
32
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
33
LIST_HEAD(sta_poll_list);
34
struct rate_info *rate;
35
s8 rssi[4];
36
int i;
37
38
spin_lock_bh(&dev->mt76.sta_poll_lock);
39
list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
40
spin_unlock_bh(&dev->mt76.sta_poll_lock);
41
42
while (true) {
43
bool clear = false;
44
u32 addr, val;
45
u16 idx;
46
u8 bw;
47
48
if (list_empty(&sta_poll_list))
49
break;
50
mlink = list_first_entry(&sta_poll_list,
51
struct mt792x_link_sta, wcid.poll_list);
52
msta = mlink->sta;
53
spin_lock_bh(&dev->mt76.sta_poll_lock);
54
list_del_init(&mlink->wcid.poll_list);
55
spin_unlock_bh(&dev->mt76.sta_poll_lock);
56
57
idx = mlink->wcid.idx;
58
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET);
59
60
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
61
u32 tx_last = mlink->airtime_ac[i];
62
u32 rx_last = mlink->airtime_ac[i + 4];
63
64
mlink->airtime_ac[i] = mt76_rr(dev, addr);
65
mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
66
67
tx_time[i] = mlink->airtime_ac[i] - tx_last;
68
rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
69
70
if ((tx_last | rx_last) & BIT(30))
71
clear = true;
72
73
addr += 8;
74
}
75
76
if (clear) {
77
mt7925_mac_wtbl_update(dev, idx,
78
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
79
memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
80
}
81
82
if (!mlink->wcid.sta)
83
continue;
84
85
sta = container_of((void *)msta, struct ieee80211_sta,
86
drv_priv);
87
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
88
u8 q = mt76_connac_lmac_mapping(i);
89
u32 tx_cur = tx_time[q];
90
u32 rx_cur = rx_time[q];
91
u8 tid = ac_to_tid[i];
92
93
if (!tx_cur && !rx_cur)
94
continue;
95
96
ieee80211_sta_register_airtime(sta, tid, tx_cur,
97
rx_cur);
98
}
99
100
/* We don't support reading GI info from txs packets.
101
* For accurate tx status reporting and AQL improvement,
102
* we need to make sure that flags match so polling GI
103
* from per-sta counters directly.
104
*/
105
rate = &mlink->wcid.rate;
106
107
switch (rate->bw) {
108
case RATE_INFO_BW_160:
109
bw = IEEE80211_STA_RX_BW_160;
110
break;
111
case RATE_INFO_BW_80:
112
bw = IEEE80211_STA_RX_BW_80;
113
break;
114
case RATE_INFO_BW_40:
115
bw = IEEE80211_STA_RX_BW_40;
116
break;
117
default:
118
bw = IEEE80211_STA_RX_BW_20;
119
break;
120
}
121
122
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 6);
123
val = mt76_rr(dev, addr);
124
if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
125
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 5);
126
val = mt76_rr(dev, addr);
127
rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
128
} else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
129
u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw;
130
131
rate->he_gi = (val & (0x3 << offs)) >> offs;
132
} else if (rate->flags &
133
(RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
134
if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw))
135
rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
136
else
137
rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
138
}
139
140
/* get signal strength of resp frames (CTS/BA/ACK) */
141
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 34);
142
val = mt76_rr(dev, addr);
143
144
rssi[0] = to_rssi(GENMASK(7, 0), val);
145
rssi[1] = to_rssi(GENMASK(15, 8), val);
146
rssi[2] = to_rssi(GENMASK(23, 16), val);
147
rssi[3] = to_rssi(GENMASK(31, 14), val);
148
149
mlink->ack_signal =
150
mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
151
152
ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
153
}
154
}
155
156
void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev,
157
u8 tbl_idx, u16 rate_idx)
158
{
159
u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
160
161
mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
162
/* use wtbl spe idx */
163
mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
164
mt76_wr(dev, MT_WTBL_ITCR, ctrl);
165
}
166
167
/* The HW does not translate the mac header to 802.3 for mesh point */
168
static int mt7925_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
169
{
170
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
171
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
172
struct mt792x_sta *msta = (struct mt792x_sta *)status->wcid;
173
__le32 *rxd = (__le32 *)skb->data;
174
struct ieee80211_sta *sta;
175
struct ieee80211_vif *vif;
176
struct ieee80211_hdr hdr;
177
u16 frame_control;
178
179
if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
180
MT_RXD3_NORMAL_U2M)
181
return -EINVAL;
182
183
if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
184
return -EINVAL;
185
186
if (!msta || !msta->vif)
187
return -EINVAL;
188
189
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
190
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
191
192
/* store the info from RXD and ethhdr to avoid being overridden */
193
frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
194
hdr.frame_control = cpu_to_le16(frame_control);
195
hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
196
hdr.duration_id = 0;
197
198
ether_addr_copy(hdr.addr1, vif->addr);
199
ether_addr_copy(hdr.addr2, sta->addr);
200
switch (frame_control & (IEEE80211_FCTL_TODS |
201
IEEE80211_FCTL_FROMDS)) {
202
case 0:
203
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
204
break;
205
case IEEE80211_FCTL_FROMDS:
206
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
207
break;
208
case IEEE80211_FCTL_TODS:
209
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
210
break;
211
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
212
ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
213
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
214
break;
215
default:
216
break;
217
}
218
219
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
220
if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
221
eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
222
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
223
else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
224
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
225
else
226
skb_pull(skb, 2);
227
228
if (ieee80211_has_order(hdr.frame_control))
229
memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
230
IEEE80211_HT_CTL_LEN);
231
if (ieee80211_is_data_qos(hdr.frame_control)) {
232
__le16 qos_ctrl;
233
234
qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
235
memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
236
IEEE80211_QOS_CTL_LEN);
237
}
238
239
if (ieee80211_has_a4(hdr.frame_control))
240
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
241
else
242
memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
243
244
return 0;
245
}
246
247
static int
248
mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
249
struct mt76_rx_status *status,
250
struct ieee80211_supported_band *sband,
251
__le32 *rxv, u8 *mode)
252
{
253
u32 v0, v2;
254
u8 stbc, gi, bw, dcm, nss;
255
int i, idx;
256
bool cck = false;
257
258
v0 = le32_to_cpu(rxv[0]);
259
v2 = le32_to_cpu(rxv[2]);
260
261
idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
262
i = idx;
263
nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
264
265
stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
266
gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
267
*mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
268
dcm = FIELD_GET(MT_PRXV_DCM, v2);
269
bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
270
271
switch (*mode) {
272
case MT_PHY_TYPE_CCK:
273
cck = true;
274
fallthrough;
275
case MT_PHY_TYPE_OFDM:
276
i = mt76_get_rate(&dev->mt76, sband, i, cck);
277
break;
278
case MT_PHY_TYPE_HT_GF:
279
case MT_PHY_TYPE_HT:
280
status->encoding = RX_ENC_HT;
281
if (gi)
282
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
283
if (i > 31)
284
return -EINVAL;
285
break;
286
case MT_PHY_TYPE_VHT:
287
status->nss = nss;
288
status->encoding = RX_ENC_VHT;
289
if (gi)
290
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
291
if (i > 11)
292
return -EINVAL;
293
break;
294
case MT_PHY_TYPE_HE_MU:
295
case MT_PHY_TYPE_HE_SU:
296
case MT_PHY_TYPE_HE_EXT_SU:
297
case MT_PHY_TYPE_HE_TB:
298
status->nss = nss;
299
status->encoding = RX_ENC_HE;
300
i &= GENMASK(3, 0);
301
302
if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
303
status->he_gi = gi;
304
305
status->he_dcm = dcm;
306
break;
307
case MT_PHY_TYPE_EHT_SU:
308
case MT_PHY_TYPE_EHT_TRIG:
309
case MT_PHY_TYPE_EHT_MU:
310
status->nss = nss;
311
status->encoding = RX_ENC_EHT;
312
i &= GENMASK(3, 0);
313
314
if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
315
status->eht.gi = gi;
316
break;
317
default:
318
return -EINVAL;
319
}
320
status->rate_idx = i;
321
322
switch (bw) {
323
case IEEE80211_STA_RX_BW_20:
324
break;
325
case IEEE80211_STA_RX_BW_40:
326
if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
327
(idx & MT_PRXV_TX_ER_SU_106T)) {
328
status->bw = RATE_INFO_BW_HE_RU;
329
status->he_ru =
330
NL80211_RATE_INFO_HE_RU_ALLOC_106;
331
} else {
332
status->bw = RATE_INFO_BW_40;
333
}
334
break;
335
case IEEE80211_STA_RX_BW_80:
336
status->bw = RATE_INFO_BW_80;
337
break;
338
case IEEE80211_STA_RX_BW_160:
339
status->bw = RATE_INFO_BW_160;
340
break;
341
default:
342
return -EINVAL;
343
}
344
345
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
346
if (*mode < MT_PHY_TYPE_HE_SU && gi)
347
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
348
349
return 0;
350
}
351
352
static int
353
mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
354
{
355
u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
356
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
357
bool hdr_trans, unicast, insert_ccmp_hdr = false;
358
u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
359
u16 hdr_gap;
360
__le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
361
struct mt76_phy *mphy = &dev->mt76.phy;
362
struct mt792x_phy *phy = &dev->phy;
363
struct ieee80211_supported_band *sband;
364
u32 csum_status = *(u32 *)skb->cb;
365
u32 rxd1 = le32_to_cpu(rxd[1]);
366
u32 rxd2 = le32_to_cpu(rxd[2]);
367
u32 rxd3 = le32_to_cpu(rxd[3]);
368
u32 rxd4 = le32_to_cpu(rxd[4]);
369
struct mt792x_link_sta *mlink;
370
u8 mode = 0; /* , band_idx; */
371
u16 seq_ctrl = 0;
372
__le16 fc = 0;
373
int idx;
374
375
memset(status, 0, sizeof(*status));
376
377
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
378
return -EINVAL;
379
380
if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
381
return -EINVAL;
382
383
hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
384
if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
385
return -EINVAL;
386
387
/* ICV error or CCMP/BIP/WPI MIC error */
388
if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
389
status->flag |= RX_FLAG_ONLY_MONITOR;
390
391
chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
392
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
393
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
394
status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
395
396
if (status->wcid) {
397
mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
398
mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
399
}
400
401
mt792x_get_status_freq_info(status, chfreq);
402
403
switch (status->band) {
404
case NL80211_BAND_5GHZ:
405
sband = &mphy->sband_5g.sband;
406
break;
407
case NL80211_BAND_6GHZ:
408
sband = &mphy->sband_6g.sband;
409
break;
410
default:
411
sband = &mphy->sband_2g.sband;
412
break;
413
}
414
415
if (!sband->channels)
416
return -EINVAL;
417
418
if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask &&
419
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
420
skb->ip_summed = CHECKSUM_UNNECESSARY;
421
422
if (rxd3 & MT_RXD3_NORMAL_FCS_ERR)
423
status->flag |= RX_FLAG_FAILED_FCS_CRC;
424
425
if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
426
status->flag |= RX_FLAG_MMIC_ERROR;
427
428
if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
429
!(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
430
status->flag |= RX_FLAG_DECRYPTED;
431
status->flag |= RX_FLAG_IV_STRIPPED;
432
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
433
}
434
435
remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
436
437
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
438
return -EINVAL;
439
440
rxd += 8;
441
if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
442
u32 v0 = le32_to_cpu(rxd[0]);
443
u32 v2 = le32_to_cpu(rxd[2]);
444
445
/* TODO: need to map rxd address */
446
fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
447
seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
448
qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
449
450
rxd += 4;
451
if ((u8 *)rxd - skb->data >= skb->len)
452
return -EINVAL;
453
}
454
455
if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
456
u8 *data = (u8 *)rxd;
457
458
if (status->flag & RX_FLAG_DECRYPTED) {
459
switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
460
case MT_CIPHER_AES_CCMP:
461
case MT_CIPHER_CCMP_CCX:
462
case MT_CIPHER_CCMP_256:
463
insert_ccmp_hdr =
464
FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
465
fallthrough;
466
case MT_CIPHER_TKIP:
467
case MT_CIPHER_TKIP_NO_MIC:
468
case MT_CIPHER_GCMP:
469
case MT_CIPHER_GCMP_256:
470
status->iv[0] = data[5];
471
status->iv[1] = data[4];
472
status->iv[2] = data[3];
473
status->iv[3] = data[2];
474
status->iv[4] = data[1];
475
status->iv[5] = data[0];
476
break;
477
default:
478
break;
479
}
480
}
481
rxd += 4;
482
if ((u8 *)rxd - skb->data >= skb->len)
483
return -EINVAL;
484
}
485
486
if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
487
status->timestamp = le32_to_cpu(rxd[0]);
488
status->flag |= RX_FLAG_MACTIME_START;
489
490
if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
491
status->flag |= RX_FLAG_AMPDU_DETAILS;
492
493
/* all subframes of an A-MPDU have the same timestamp */
494
if (phy->rx_ampdu_ts != status->timestamp) {
495
if (!++phy->ampdu_ref)
496
phy->ampdu_ref++;
497
}
498
phy->rx_ampdu_ts = status->timestamp;
499
500
status->ampdu_ref = phy->ampdu_ref;
501
}
502
503
rxd += 4;
504
if ((u8 *)rxd - skb->data >= skb->len)
505
return -EINVAL;
506
}
507
508
/* RXD Group 3 - P-RXV */
509
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
510
u32 v3;
511
int ret;
512
513
rxv = rxd;
514
rxd += 4;
515
if ((u8 *)rxd - skb->data >= skb->len)
516
return -EINVAL;
517
518
v3 = le32_to_cpu(rxv[3]);
519
520
status->chains = mphy->antenna_mask;
521
status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
522
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
523
status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
524
status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
525
526
/* RXD Group 5 - C-RXV */
527
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
528
rxd += 24;
529
if ((u8 *)rxd - skb->data >= skb->len)
530
return -EINVAL;
531
}
532
533
ret = mt7925_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
534
if (ret < 0)
535
return ret;
536
}
537
538
amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
539
status->amsdu = !!amsdu_info;
540
if (status->amsdu) {
541
status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
542
status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
543
}
544
545
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
546
if (hdr_trans && ieee80211_has_morefrags(fc)) {
547
if (mt7925_reverse_frag0_hdr_trans(skb, hdr_gap))
548
return -EINVAL;
549
hdr_trans = false;
550
} else {
551
int pad_start = 0;
552
553
skb_pull(skb, hdr_gap);
554
if (!hdr_trans && status->amsdu) {
555
pad_start = ieee80211_get_hdrlen_from_skb(skb);
556
} else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
557
/* When header translation failure is indicated,
558
* the hardware will insert an extra 2-byte field
559
* containing the data length after the protocol
560
* type field.
561
*/
562
pad_start = 12;
563
if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
564
pad_start += 4;
565
else
566
pad_start = 0;
567
}
568
569
if (pad_start) {
570
memmove(skb->data + 2, skb->data, pad_start);
571
skb_pull(skb, 2);
572
}
573
}
574
575
if (!hdr_trans) {
576
struct ieee80211_hdr *hdr;
577
578
if (insert_ccmp_hdr) {
579
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
580
581
mt76_insert_ccmp_hdr(skb, key_id);
582
}
583
584
hdr = mt76_skb_get_hdr(skb);
585
fc = hdr->frame_control;
586
if (ieee80211_is_data_qos(fc)) {
587
seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
588
qos_ctl = *ieee80211_get_qos_ctl(hdr);
589
}
590
skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
591
} else {
592
status->flag |= RX_FLAG_8023;
593
}
594
595
mt792x_mac_assoc_rssi(dev, skb);
596
597
if (rxv && !(status->flag & RX_FLAG_8023)) {
598
switch (status->encoding) {
599
case RX_ENC_EHT:
600
mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
601
break;
602
case RX_ENC_HE:
603
mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
604
break;
605
default:
606
break;
607
}
608
}
609
610
if (!status->wcid || !ieee80211_is_data_qos(fc))
611
return 0;
612
613
status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
614
status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
615
status->qos_ctl = qos_ctl;
616
617
return 0;
618
}
619
620
static void
621
mt7925_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
622
struct mt76_wcid *wcid)
623
{
624
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
625
u8 fc_type, fc_stype;
626
u16 ethertype;
627
bool wmm = false;
628
u32 val;
629
630
if (wcid->sta) {
631
struct ieee80211_sta *sta;
632
633
sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
634
wmm = sta->wme;
635
}
636
637
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
638
FIELD_PREP(MT_TXD1_TID, tid);
639
640
ethertype = get_unaligned_be16(&skb->data[12]);
641
if (ethertype >= ETH_P_802_3_MIN)
642
val |= MT_TXD1_ETH_802_3;
643
644
txwi[1] |= cpu_to_le32(val);
645
646
fc_type = IEEE80211_FTYPE_DATA >> 2;
647
fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
648
649
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
650
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
651
652
txwi[2] |= cpu_to_le32(val);
653
}
654
655
static void
656
mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
657
struct sk_buff *skb,
658
struct ieee80211_key_conf *key)
659
{
660
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
661
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
662
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
663
bool multicast = is_multicast_ether_addr(hdr->addr1);
664
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
665
__le16 fc = hdr->frame_control;
666
u8 fc_type, fc_stype;
667
u32 val;
668
669
if (ieee80211_is_action(fc) &&
670
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
671
mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
672
tid = MT_TX_ADDBA;
673
else if (ieee80211_is_mgmt(hdr->frame_control))
674
tid = MT_TX_NORMAL;
675
676
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
677
FIELD_PREP(MT_TXD1_HDR_INFO,
678
ieee80211_get_hdrlen_from_skb(skb) / 2) |
679
FIELD_PREP(MT_TXD1_TID, tid);
680
681
if (!ieee80211_is_data(fc) || multicast ||
682
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
683
val |= MT_TXD1_FIXED_RATE;
684
685
if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
686
key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
687
val |= MT_TXD1_BIP;
688
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
689
}
690
691
txwi[1] |= cpu_to_le32(val);
692
693
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
694
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
695
696
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
697
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
698
699
txwi[2] |= cpu_to_le32(val);
700
701
txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
702
if (ieee80211_is_beacon(fc))
703
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
704
705
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
706
u16 seqno = le16_to_cpu(hdr->seq_ctrl);
707
708
if (ieee80211_is_back_req(hdr->frame_control)) {
709
struct ieee80211_bar *bar;
710
711
bar = (struct ieee80211_bar *)skb->data;
712
seqno = le16_to_cpu(bar->start_seq_num);
713
}
714
715
val = MT_TXD3_SN_VALID |
716
FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
717
txwi[3] |= cpu_to_le32(val);
718
txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
719
}
720
}
721
722
void
723
mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
724
struct sk_buff *skb, struct mt76_wcid *wcid,
725
struct ieee80211_key_conf *key, int pid,
726
enum mt76_txq_id qid, u32 changed)
727
{
728
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
729
struct ieee80211_vif *vif = info->control.vif;
730
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
731
u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
732
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
733
struct mt76_vif_link *mvif;
734
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
735
BSS_CHANGED_BEACON_ENABLED));
736
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
737
BSS_CHANGED_FILS_DISCOVERY));
738
struct mt792x_bss_conf *mconf;
739
740
mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
741
wcid->link_id) : NULL;
742
mvif = mconf ? (struct mt76_vif_link *)&mconf->mt76 : NULL;
743
744
if (mvif) {
745
omac_idx = mvif->omac_idx;
746
wmm_idx = mvif->wmm_idx;
747
band_idx = mvif->band_idx;
748
}
749
750
if (inband_disc) {
751
p_fmt = MT_TX_TYPE_FW;
752
q_idx = MT_LMAC_ALTX0;
753
} else if (beacon) {
754
p_fmt = MT_TX_TYPE_FW;
755
q_idx = MT_LMAC_BCN0;
756
} else if (qid >= MT_TXQ_PSD) {
757
p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
758
q_idx = MT_LMAC_ALTX0;
759
} else {
760
p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
761
q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
762
mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
763
764
/* counting non-offloading skbs */
765
wcid->stats.tx_bytes += skb->len;
766
wcid->stats.tx_packets++;
767
}
768
769
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
770
FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
771
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
772
txwi[0] = cpu_to_le32(val);
773
774
val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
775
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
776
777
if (band_idx)
778
val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
779
780
txwi[1] = cpu_to_le32(val);
781
txwi[2] = 0;
782
783
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15);
784
785
if (key)
786
val |= MT_TXD3_PROTECT_FRAME;
787
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
788
val |= MT_TXD3_NO_ACK;
789
if (wcid->amsdu)
790
val |= MT_TXD3_HW_AMSDU;
791
792
txwi[3] = cpu_to_le32(val);
793
txwi[4] = 0;
794
795
val = FIELD_PREP(MT_TXD5_PID, pid);
796
if (pid >= MT_PACKET_ID_FIRST) {
797
val |= MT_TXD5_TX_STATUS_HOST;
798
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
799
txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
800
}
801
802
txwi[5] = cpu_to_le32(val);
803
804
val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
805
if (!ieee80211_vif_is_mld(vif) ||
806
(q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0))
807
val |= MT_TXD6_DIS_MAT;
808
txwi[6] = cpu_to_le32(val);
809
txwi[7] = 0;
810
811
if (is_8023)
812
mt7925_mac_write_txwi_8023(txwi, skb, wcid);
813
else
814
mt7925_mac_write_txwi_80211(dev, txwi, skb, key);
815
816
if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
817
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
818
bool mcast = ieee80211_is_data(hdr->frame_control) &&
819
is_multicast_ether_addr(hdr->addr1);
820
u8 idx = MT792x_BASIC_RATES_TBL;
821
822
if (mvif) {
823
if (mcast && mvif->mcast_rates_idx)
824
idx = mvif->mcast_rates_idx;
825
else if (beacon && mvif->beacon_rates_idx)
826
idx = mvif->beacon_rates_idx;
827
else
828
idx = mvif->basic_rates_idx;
829
}
830
831
txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
832
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
833
}
834
}
835
EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi);
836
837
static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb,
838
struct mt76_wcid *wcid)
839
{
840
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
841
struct ieee80211_link_sta *link_sta;
842
struct mt792x_link_sta *mlink;
843
struct mt792x_sta *msta;
844
bool is_8023;
845
u16 fc, tid;
846
847
link_sta = rcu_dereference(sta->link[wcid->link_id]);
848
if (!link_sta)
849
return;
850
851
if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
852
return;
853
854
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
855
is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
856
857
if (is_8023) {
858
fc = IEEE80211_FTYPE_DATA |
859
(sta->wme ? IEEE80211_STYPE_QOS_DATA :
860
IEEE80211_STYPE_DATA);
861
} else {
862
/* No need to get precise TID for Action/Management Frame,
863
* since it will not meet the following Frame Control
864
* condition anyway.
865
*/
866
867
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
868
869
fc = le16_to_cpu(hdr->frame_control) &
870
(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
871
}
872
873
if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
874
return;
875
876
msta = (struct mt792x_sta *)sta->drv_priv;
877
878
if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED)
879
mlink = rcu_dereference(msta->link[msta->deflink_id]);
880
else
881
mlink = &msta->deflink;
882
883
if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state))
884
ieee80211_start_tx_ba_session(sta, tid, 0);
885
}
886
887
static bool
888
mt7925_mac_add_txs_skb(struct mt792x_dev *dev, struct mt76_wcid *wcid,
889
int pid, __le32 *txs_data)
890
{
891
struct mt76_sta_stats *stats = &wcid->stats;
892
struct ieee80211_supported_band *sband;
893
struct mt76_dev *mdev = &dev->mt76;
894
struct mt76_phy *mphy;
895
struct ieee80211_tx_info *info;
896
struct sk_buff_head list;
897
struct rate_info rate = {};
898
struct sk_buff *skb;
899
bool cck = false;
900
u32 txrate, txs, mode, stbc;
901
902
mt76_tx_status_lock(mdev, &list);
903
skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
904
if (!skb)
905
goto out_no_skb;
906
907
txs = le32_to_cpu(txs_data[0]);
908
909
info = IEEE80211_SKB_CB(skb);
910
if (!(txs & MT_TXS0_ACK_ERROR_MASK))
911
info->flags |= IEEE80211_TX_STAT_ACK;
912
913
info->status.ampdu_len = 1;
914
info->status.ampdu_ack_len = !!(info->flags &
915
IEEE80211_TX_STAT_ACK);
916
917
info->status.rates[0].idx = -1;
918
919
txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
920
921
rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
922
rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
923
stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
924
925
if (stbc && rate.nss > 1)
926
rate.nss >>= 1;
927
928
if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
929
stats->tx_nss[rate.nss - 1]++;
930
if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
931
stats->tx_mcs[rate.mcs]++;
932
933
mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
934
switch (mode) {
935
case MT_PHY_TYPE_CCK:
936
cck = true;
937
fallthrough;
938
case MT_PHY_TYPE_OFDM:
939
mphy = mt76_dev_phy(mdev, wcid->phy_idx);
940
941
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
942
sband = &mphy->sband_5g.sband;
943
else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
944
sband = &mphy->sband_6g.sband;
945
else
946
sband = &mphy->sband_2g.sband;
947
948
rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
949
rate.legacy = sband->bitrates[rate.mcs].bitrate;
950
break;
951
case MT_PHY_TYPE_HT:
952
case MT_PHY_TYPE_HT_GF:
953
if (rate.mcs > 31)
954
goto out;
955
956
rate.flags = RATE_INFO_FLAGS_MCS;
957
if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
958
rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
959
break;
960
case MT_PHY_TYPE_VHT:
961
if (rate.mcs > 9)
962
goto out;
963
964
rate.flags = RATE_INFO_FLAGS_VHT_MCS;
965
break;
966
case MT_PHY_TYPE_HE_SU:
967
case MT_PHY_TYPE_HE_EXT_SU:
968
case MT_PHY_TYPE_HE_TB:
969
case MT_PHY_TYPE_HE_MU:
970
if (rate.mcs > 11)
971
goto out;
972
973
rate.he_gi = wcid->rate.he_gi;
974
rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
975
rate.flags = RATE_INFO_FLAGS_HE_MCS;
976
break;
977
case MT_PHY_TYPE_EHT_SU:
978
case MT_PHY_TYPE_EHT_TRIG:
979
case MT_PHY_TYPE_EHT_MU:
980
if (rate.mcs > 13)
981
goto out;
982
983
rate.eht_gi = wcid->rate.eht_gi;
984
rate.flags = RATE_INFO_FLAGS_EHT_MCS;
985
break;
986
default:
987
goto out;
988
}
989
990
stats->tx_mode[mode]++;
991
992
switch (FIELD_GET(MT_TXS0_BW, txs)) {
993
case IEEE80211_STA_RX_BW_160:
994
rate.bw = RATE_INFO_BW_160;
995
stats->tx_bw[3]++;
996
break;
997
case IEEE80211_STA_RX_BW_80:
998
rate.bw = RATE_INFO_BW_80;
999
stats->tx_bw[2]++;
1000
break;
1001
case IEEE80211_STA_RX_BW_40:
1002
rate.bw = RATE_INFO_BW_40;
1003
stats->tx_bw[1]++;
1004
break;
1005
default:
1006
rate.bw = RATE_INFO_BW_20;
1007
stats->tx_bw[0]++;
1008
break;
1009
}
1010
wcid->rate = rate;
1011
1012
out:
1013
mt76_tx_status_skb_done(mdev, skb, &list);
1014
1015
out_no_skb:
1016
mt76_tx_status_unlock(mdev, &list);
1017
1018
return !!skb;
1019
}
1020
1021
void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
1022
{
1023
struct mt792x_link_sta *mlink = NULL;
1024
struct mt76_wcid *wcid;
1025
__le32 *txs_data = data;
1026
u16 wcidx;
1027
u8 pid;
1028
1029
if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1030
return;
1031
1032
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1033
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1034
1035
if (pid < MT_PACKET_ID_FIRST)
1036
return;
1037
1038
if (wcidx >= MT792x_WTBL_SIZE)
1039
return;
1040
1041
rcu_read_lock();
1042
1043
wcid = mt76_wcid_ptr(dev, wcidx);
1044
if (!wcid)
1045
goto out;
1046
1047
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1048
1049
mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data);
1050
if (!wcid->sta)
1051
goto out;
1052
1053
mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
1054
1055
out:
1056
rcu_read_unlock();
1057
}
1058
1059
void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
1060
struct ieee80211_sta *sta, struct mt76_wcid *wcid,
1061
struct list_head *free_list)
1062
{
1063
struct mt76_dev *mdev = &dev->mt76;
1064
__le32 *txwi;
1065
u16 wcid_idx;
1066
1067
mt76_connac_txp_skb_unmap(mdev, t);
1068
if (!t->skb)
1069
goto out;
1070
1071
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1072
if (sta) {
1073
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1074
mt7925_tx_check_aggr(sta, t->skb, wcid);
1075
1076
wcid_idx = wcid->idx;
1077
} else {
1078
wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1079
}
1080
1081
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1082
out:
1083
t->skb = NULL;
1084
mt76_put_txwi(mdev, t);
1085
}
1086
EXPORT_SYMBOL_GPL(mt7925_txwi_free);
1087
1088
static void
1089
mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
1090
{
1091
__le32 *tx_free = (__le32 *)data, *cur_info;
1092
struct mt76_dev *mdev = &dev->mt76;
1093
struct mt76_txwi_cache *txwi;
1094
struct ieee80211_sta *sta = NULL;
1095
struct mt76_wcid *wcid = NULL;
1096
LIST_HEAD(free_list);
1097
struct sk_buff *skb, *tmp;
1098
#if defined(__linux__)
1099
void *end = data + len;
1100
#elif defined(__FreeBSD__)
1101
void *end = (u8 *)data + len;
1102
#endif
1103
bool wake = false;
1104
u16 total, count = 0;
1105
1106
/* clean DMA queues and unmap buffers first */
1107
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1108
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1109
1110
if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1111
return;
1112
1113
total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1114
for (cur_info = &tx_free[2]; count < total; cur_info++) {
1115
u32 msdu, info;
1116
u8 i;
1117
1118
if (WARN_ON_ONCE((void *)cur_info >= end))
1119
return;
1120
/* 1'b1: new wcid pair.
1121
* 1'b0: msdu_id with the same 'wcid pair' as above.
1122
*/
1123
info = le32_to_cpu(*cur_info);
1124
if (info & MT_TXFREE_INFO_PAIR) {
1125
struct mt792x_link_sta *mlink;
1126
u16 idx;
1127
1128
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1129
wcid = mt76_wcid_ptr(dev, idx);
1130
sta = wcid_to_sta(wcid);
1131
if (!sta)
1132
continue;
1133
1134
mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1135
mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
1136
continue;
1137
}
1138
1139
if (info & MT_TXFREE_INFO_HEADER) {
1140
if (wcid) {
1141
wcid->stats.tx_retries +=
1142
FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1143
wcid->stats.tx_failed +=
1144
!!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1145
}
1146
continue;
1147
}
1148
1149
for (i = 0; i < 2; i++) {
1150
msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1151
if (msdu == MT_TXFREE_INFO_MSDU_ID)
1152
continue;
1153
1154
count++;
1155
txwi = mt76_token_release(mdev, msdu, &wake);
1156
if (!txwi)
1157
continue;
1158
1159
mt7925_txwi_free(dev, txwi, sta, wcid, &free_list);
1160
}
1161
}
1162
1163
mt7925_mac_sta_poll(dev);
1164
1165
if (wake)
1166
mt76_set_tx_blocked(&dev->mt76, false);
1167
1168
mt76_worker_schedule(&dev->mt76.tx_worker);
1169
1170
list_for_each_entry_safe(skb, tmp, &free_list, list) {
1171
skb_list_del_init(skb);
1172
napi_consume_skb(skb, 1);
1173
}
1174
}
1175
1176
bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len)
1177
{
1178
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1179
__le32 *rxd = (__le32 *)data;
1180
__le32 *end = (__le32 *)&rxd[len / 4];
1181
enum rx_pkt_type type;
1182
1183
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1184
if (type != PKT_TYPE_NORMAL) {
1185
u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1186
1187
if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1188
MT_RXD0_SW_PKT_TYPE_FRAME))
1189
return true;
1190
}
1191
1192
switch (type) {
1193
case PKT_TYPE_TXRX_NOTIFY:
1194
/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1195
mt7925_mac_tx_free(dev, data, len); /* mmio */
1196
return false;
1197
case PKT_TYPE_TXS:
1198
for (rxd += 4; rxd + 12 <= end; rxd += 12)
1199
mt7925_mac_add_txs(dev, rxd);
1200
return false;
1201
default:
1202
return true;
1203
}
1204
}
1205
EXPORT_SYMBOL_GPL(mt7925_rx_check);
1206
1207
void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1208
struct sk_buff *skb, u32 *info)
1209
{
1210
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1211
__le32 *rxd = (__le32 *)skb->data;
1212
__le32 *end = (__le32 *)&skb->data[skb->len];
1213
enum rx_pkt_type type;
1214
u16 flag;
1215
1216
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1217
flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
1218
if (type != PKT_TYPE_NORMAL) {
1219
u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1220
1221
if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1222
MT_RXD0_SW_PKT_TYPE_FRAME))
1223
type = PKT_TYPE_NORMAL;
1224
}
1225
1226
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1227
type = PKT_TYPE_NORMAL_MCU;
1228
1229
switch (type) {
1230
case PKT_TYPE_TXRX_NOTIFY:
1231
/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1232
mt7925_mac_tx_free(dev, skb->data, skb->len);
1233
napi_consume_skb(skb, 1);
1234
break;
1235
case PKT_TYPE_RX_EVENT:
1236
mt7925_mcu_rx_event(dev, skb);
1237
break;
1238
case PKT_TYPE_TXS:
1239
for (rxd += 2; rxd + 8 <= end; rxd += 8)
1240
mt7925_mac_add_txs(dev, rxd);
1241
dev_kfree_skb(skb);
1242
break;
1243
case PKT_TYPE_NORMAL_MCU:
1244
case PKT_TYPE_NORMAL:
1245
if (!mt7925_mac_fill_rx(dev, skb)) {
1246
mt76_rx(&dev->mt76, q, skb);
1247
return;
1248
}
1249
fallthrough;
1250
default:
1251
dev_kfree_skb(skb);
1252
break;
1253
}
1254
}
1255
EXPORT_SYMBOL_GPL(mt7925_queue_rx_skb);
1256
1257
static void
1258
mt7925_vif_connect_iter(void *priv, u8 *mac,
1259
struct ieee80211_vif *vif)
1260
{
1261
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
1262
unsigned long valid = ieee80211_vif_is_mld(vif) ?
1263
mvif->valid_links : BIT(0);
1264
struct mt792x_dev *dev = mvif->phy->dev;
1265
struct ieee80211_hw *hw = mt76_hw(dev);
1266
struct ieee80211_bss_conf *bss_conf;
1267
struct mt792x_bss_conf *mconf;
1268
int i;
1269
1270
if (vif->type == NL80211_IFTYPE_STATION)
1271
ieee80211_disconnect(vif, true);
1272
1273
for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
1274
bss_conf = mt792x_vif_to_bss_conf(vif, i);
1275
mconf = mt792x_vif_to_link(mvif, i);
1276
1277
mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, &mconf->mt76,
1278
&mvif->sta.deflink.wcid, true);
1279
mt7925_mcu_set_tx(dev, bss_conf);
1280
}
1281
1282
if (vif->type == NL80211_IFTYPE_AP) {
1283
mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
1284
true, NULL);
1285
mt7925_mcu_sta_update(dev, NULL, vif, true,
1286
MT76_STA_INFO_STATE_NONE);
1287
mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true);
1288
}
1289
}
1290
1291
/* system error recovery */
1292
void mt7925_mac_reset_work(struct work_struct *work)
1293
{
1294
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1295
reset_work);
1296
struct ieee80211_hw *hw = mt76_hw(dev);
1297
struct mt76_connac_pm *pm = &dev->pm;
1298
int i, ret;
1299
1300
dev_dbg(dev->mt76.dev, "chip reset\n");
1301
dev->hw_full_reset = true;
1302
ieee80211_stop_queues(hw);
1303
1304
cancel_delayed_work_sync(&dev->mphy.mac_work);
1305
cancel_delayed_work_sync(&pm->ps_work);
1306
cancel_work_sync(&pm->wake_work);
1307
dev->sar_inited = false;
1308
1309
for (i = 0; i < 10; i++) {
1310
mutex_lock(&dev->mt76.mutex);
1311
ret = mt792x_dev_reset(dev);
1312
mutex_unlock(&dev->mt76.mutex);
1313
1314
if (!ret)
1315
break;
1316
}
1317
1318
if (i == 10)
1319
dev_err(dev->mt76.dev, "chip reset failed\n");
1320
1321
if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
1322
struct cfg80211_scan_info info = {
1323
.aborted = true,
1324
};
1325
1326
ieee80211_scan_completed(dev->mphy.hw, &info);
1327
}
1328
1329
dev->hw_full_reset = false;
1330
pm->suspended = false;
1331
ieee80211_wake_queues(hw);
1332
ieee80211_iterate_active_interfaces(hw,
1333
IEEE80211_IFACE_ITER_RESUME_ALL,
1334
mt7925_vif_connect_iter, NULL);
1335
mt76_connac_power_save_sched(&dev->mt76.phy, pm);
1336
}
1337
1338
void mt7925_coredump_work(struct work_struct *work)
1339
{
1340
struct mt792x_dev *dev;
1341
char *dump, *data;
1342
1343
dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
1344
coredump.work.work);
1345
1346
if (time_is_after_jiffies(dev->coredump.last_activity +
1347
4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1348
queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1349
MT76_CONNAC_COREDUMP_TIMEOUT);
1350
return;
1351
}
1352
1353
dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1354
data = dump;
1355
1356
while (true) {
1357
struct sk_buff *skb;
1358
1359
spin_lock_bh(&dev->mt76.lock);
1360
skb = __skb_dequeue(&dev->coredump.msg_list);
1361
spin_unlock_bh(&dev->mt76.lock);
1362
1363
if (!skb)
1364
break;
1365
1366
skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 8);
1367
if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1368
dev_kfree_skb(skb);
1369
continue;
1370
}
1371
1372
memcpy(data, skb->data, skb->len);
1373
data += skb->len;
1374
1375
dev_kfree_skb(skb);
1376
}
1377
1378
if (dump)
1379
dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1380
GFP_KERNEL);
1381
1382
mt792x_reset(&dev->mt76);
1383
}
1384
1385
/* usb_sdio */
1386
static void
1387
mt7925_usb_sdio_write_txwi(struct mt792x_dev *dev, struct mt76_wcid *wcid,
1388
enum mt76_txq_id qid, struct ieee80211_sta *sta,
1389
struct ieee80211_key_conf *key, int pid,
1390
struct sk_buff *skb)
1391
{
1392
__le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
1393
1394
memset(txwi, 0, MT_SDIO_TXD_SIZE);
1395
mt7925_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
1396
skb_push(skb, MT_SDIO_TXD_SIZE);
1397
}
1398
1399
int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1400
enum mt76_txq_id qid, struct mt76_wcid *wcid,
1401
struct ieee80211_sta *sta,
1402
struct mt76_tx_info *tx_info)
1403
{
1404
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1405
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1406
struct ieee80211_key_conf *key = info->control.hw_key;
1407
struct sk_buff *skb = tx_info->skb;
1408
int err, pad, pktid;
1409
1410
if (unlikely(tx_info->skb->len <= ETH_HLEN))
1411
return -EINVAL;
1412
1413
if (!wcid)
1414
wcid = &dev->mt76.global_wcid;
1415
1416
if (sta) {
1417
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
1418
1419
if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
1420
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1421
msta->deflink.last_txs = jiffies;
1422
}
1423
}
1424
1425
pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
1426
mt7925_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
1427
1428
mt792x_skb_add_usb_sdio_hdr(dev, skb, 0);
1429
pad = round_up(skb->len, 4) - skb->len;
1430
if (mt76_is_usb(mdev))
1431
pad += 4;
1432
1433
err = mt76_skb_adjust_pad(skb, pad);
1434
if (err)
1435
/* Release pktid in case of error. */
1436
idr_remove(&wcid->pktid, pktid);
1437
1438
return err;
1439
}
1440
EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_prepare_skb);
1441
1442
void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
1443
struct mt76_queue_entry *e)
1444
{
1445
__le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
1446
unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
1447
struct ieee80211_sta *sta;
1448
struct mt76_wcid *wcid;
1449
u16 idx;
1450
1451
idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1452
wcid = __mt76_wcid_ptr(mdev, idx);
1453
sta = wcid_to_sta(wcid);
1454
1455
if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1456
mt7925_tx_check_aggr(sta, e->skb, wcid);
1457
1458
skb_pull(e->skb, headroom);
1459
mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1460
}
1461
EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_complete_skb);
1462
1463
bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
1464
{
1465
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1466
1467
mt792x_mutex_acquire(dev);
1468
mt7925_mac_sta_poll(dev);
1469
mt792x_mutex_release(dev);
1470
1471
return false;
1472
}
1473
EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_status_data);
1474
1475
#if IS_ENABLED(CONFIG_IPV6)
1476
void mt7925_set_ipv6_ns_work(struct work_struct *work)
1477
{
1478
struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1479
ipv6_ns_work);
1480
struct sk_buff *skb;
1481
int ret = 0;
1482
1483
do {
1484
skb = skb_dequeue(&dev->ipv6_ns_list);
1485
1486
if (!skb)
1487
break;
1488
1489
mt792x_mutex_acquire(dev);
1490
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1491
MCU_UNI_CMD(OFFLOAD), true);
1492
mt792x_mutex_release(dev);
1493
1494
} while (!ret);
1495
1496
if (ret)
1497
skb_queue_purge(&dev->ipv6_ns_list);
1498
}
1499
#endif
1500
1501