Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/mediatek/mt76/mt7615/mcu.c
48524 views
1
// SPDX-License-Identifier: ISC
2
/* Copyright (C) 2019 MediaTek Inc.
3
*
4
* Author: Roy Luo <[email protected]>
5
* Ryder Lee <[email protected]>
6
*/
7
8
#include <linux/firmware.h>
9
#include "mt7615.h"
10
#include "mcu.h"
11
#include "mac.h"
12
#include "eeprom.h"
13
14
static bool prefer_offload_fw = true;
15
module_param(prefer_offload_fw, bool, 0644);
16
MODULE_PARM_DESC(prefer_offload_fw,
17
"Prefer client mode offload firmware (MT7663)");
18
19
struct mt7615_patch_hdr {
20
char build_date[16];
21
char platform[4];
22
__be32 hw_sw_ver;
23
__be32 patch_ver;
24
__be16 checksum;
25
} __packed;
26
27
struct mt7615_fw_trailer {
28
__le32 addr;
29
u8 chip_id;
30
u8 feature_set;
31
u8 eco_code;
32
char fw_ver[10];
33
char build_date[15];
34
__le32 len;
35
} __packed;
36
37
#define FW_V3_COMMON_TAILER_SIZE 36
38
#define FW_V3_REGION_TAILER_SIZE 40
39
#define FW_START_OVERRIDE BIT(0)
40
#define FW_START_DLYCAL BIT(1)
41
#define FW_START_WORKING_PDA_CR4 BIT(2)
42
43
struct mt7663_fw_buf {
44
__le32 crc;
45
__le32 d_img_size;
46
__le32 block_size;
47
u8 rsv[4];
48
__le32 img_dest_addr;
49
__le32 img_size;
50
u8 feature_set;
51
};
52
53
#define MT7615_PATCH_ADDRESS 0x80000
54
#define MT7622_PATCH_ADDRESS 0x9c000
55
#define MT7663_PATCH_ADDRESS 0xdc000
56
57
#define N9_REGION_NUM 2
58
#define CR4_REGION_NUM 1
59
60
#define IMG_CRC_LEN 4
61
62
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
63
int cmd, int *wait_seq)
64
{
65
int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
66
struct mt7615_uni_txd *uni_txd;
67
struct mt7615_mcu_txd *mcu_txd;
68
u8 seq, q_idx, pkt_fmt;
69
__le32 *txd;
70
u32 val;
71
72
/* TODO: make dynamic based on msg type */
73
dev->mt76.mcu.timeout = 20 * HZ;
74
75
seq = ++dev->mt76.mcu.msg_seq & 0xf;
76
if (!seq)
77
seq = ++dev->mt76.mcu.msg_seq & 0xf;
78
if (wait_seq)
79
*wait_seq = seq;
80
81
txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
82
txd = (__le32 *)skb_push(skb, txd_len);
83
84
if (cmd != MCU_CMD(FW_SCATTER)) {
85
q_idx = MT_TX_MCU_PORT_RX_Q0;
86
pkt_fmt = MT_TX_TYPE_CMD;
87
} else {
88
q_idx = MT_TX_MCU_PORT_RX_FWDL;
89
pkt_fmt = MT_TX_TYPE_FW;
90
}
91
92
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
93
FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) |
94
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
95
txd[0] = cpu_to_le32(val);
96
97
val = MT_TXD1_LONG_FORMAT |
98
FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD) |
99
FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
100
txd[1] = cpu_to_le32(val);
101
102
if (cmd & __MCU_CMD_FIELD_UNI) {
103
uni_txd = (struct mt7615_uni_txd *)txd;
104
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
105
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
106
uni_txd->cid = cpu_to_le16(mcu_cmd);
107
uni_txd->s2d_index = MCU_S2D_H2N;
108
uni_txd->pkt_type = MCU_PKT_ID;
109
uni_txd->seq = seq;
110
111
return;
112
}
113
114
mcu_txd = (struct mt7615_mcu_txd *)txd;
115
mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
116
mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx));
117
mcu_txd->s2d_index = MCU_S2D_H2N;
118
mcu_txd->pkt_type = MCU_PKT_ID;
119
mcu_txd->seq = seq;
120
mcu_txd->cid = mcu_cmd;
121
mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd);
122
123
if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) {
124
if (cmd & __MCU_CMD_FIELD_QUERY)
125
mcu_txd->set_query = MCU_Q_QUERY;
126
else
127
mcu_txd->set_query = MCU_Q_SET;
128
mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid;
129
} else {
130
mcu_txd->set_query = MCU_Q_NA;
131
}
132
}
133
EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg);
134
135
int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd,
136
struct sk_buff *skb, int seq)
137
{
138
struct mt7615_mcu_rxd *rxd;
139
int ret = 0;
140
141
if (!skb) {
142
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
143
cmd, seq);
144
return -ETIMEDOUT;
145
}
146
147
rxd = (struct mt7615_mcu_rxd *)skb->data;
148
if (seq != rxd->seq)
149
return -EAGAIN;
150
151
if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
152
skb_pull(skb, sizeof(*rxd) - 4);
153
ret = *skb->data;
154
} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
155
skb_pull(skb, sizeof(*rxd));
156
ret = le32_to_cpu(*(__le32 *)skb->data);
157
} else if (cmd == MCU_EXT_QUERY(RF_REG_ACCESS)) {
158
skb_pull(skb, sizeof(*rxd));
159
ret = le32_to_cpu(*(__le32 *)&skb->data[8]);
160
} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
161
cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
162
cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
163
cmd == MCU_UNI_CMD(HIF_CTRL) ||
164
cmd == MCU_UNI_CMD(OFFLOAD) ||
165
cmd == MCU_UNI_CMD(SUSPEND)) {
166
struct mt76_connac_mcu_uni_event *event;
167
168
skb_pull(skb, sizeof(*rxd));
169
event = (struct mt76_connac_mcu_uni_event *)skb->data;
170
ret = le32_to_cpu(event->status);
171
} else if (cmd == MCU_CE_QUERY(REG_READ)) {
172
struct mt76_connac_mcu_reg_event *event;
173
174
skb_pull(skb, sizeof(*rxd));
175
event = (struct mt76_connac_mcu_reg_event *)skb->data;
176
ret = (int)le32_to_cpu(event->val);
177
}
178
179
return ret;
180
}
181
EXPORT_SYMBOL_GPL(mt7615_mcu_parse_response);
182
183
static int
184
mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
185
int cmd, int *seq)
186
{
187
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
188
enum mt76_mcuq_id qid;
189
190
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
191
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
192
qid = MT_MCUQ_WM;
193
else
194
qid = MT_MCUQ_FWDL;
195
196
return mt76_tx_queue_skb_raw(dev, dev->mt76.q_mcu[qid], skb, 0);
197
}
198
199
u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg)
200
{
201
struct {
202
__le32 wifi_stream;
203
__le32 address;
204
__le32 data;
205
} req = {
206
.wifi_stream = cpu_to_le32(wf),
207
.address = cpu_to_le32(reg),
208
};
209
210
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS),
211
&req, sizeof(req), true);
212
}
213
214
int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
215
{
216
struct {
217
__le32 wifi_stream;
218
__le32 address;
219
__le32 data;
220
} req = {
221
.wifi_stream = cpu_to_le32(wf),
222
.address = cpu_to_le32(reg),
223
.data = cpu_to_le32(val),
224
};
225
226
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS),
227
&req, sizeof(req), false);
228
}
229
230
void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
231
{
232
if (!is_mt7622(&dev->mt76))
233
return;
234
235
#if defined(__linux__)
236
regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
237
MT_INFRACFG_MISC_AP2CONN_WAKE,
238
!en * MT_INFRACFG_MISC_AP2CONN_WAKE);
239
#elif defined(__FreeBSD__)
240
panic("%s: LinuxKPI needs regmap\n", __func__);
241
#endif
242
}
243
EXPORT_SYMBOL_GPL(mt7622_trigger_hif_int);
244
245
static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
246
{
247
struct mt76_phy *mphy = &dev->mt76.phy;
248
struct mt76_connac_pm *pm = &dev->pm;
249
struct mt76_dev *mdev = &dev->mt76;
250
u32 addr;
251
int err;
252
253
if (is_mt7663(mdev)) {
254
/* Clear firmware own via N9 eint */
255
mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
256
mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
257
258
addr = MT_CONN_HIF_ON_LPCTL;
259
} else {
260
addr = MT_CFG_LPCR_HOST;
261
}
262
263
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
264
265
mt7622_trigger_hif_int(dev, true);
266
267
err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
268
269
mt7622_trigger_hif_int(dev, false);
270
271
if (err) {
272
dev_err(mdev->dev, "driver own failed\n");
273
return -ETIMEDOUT;
274
}
275
276
clear_bit(MT76_STATE_PM, &mphy->state);
277
278
pm->stats.last_wake_event = jiffies;
279
pm->stats.doze_time += pm->stats.last_wake_event -
280
pm->stats.last_doze_event;
281
282
return 0;
283
}
284
285
static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
286
{
287
struct mt76_phy *mphy = &dev->mt76.phy;
288
struct mt76_connac_pm *pm = &dev->pm;
289
int i, err = 0;
290
291
mutex_lock(&pm->mutex);
292
293
if (!test_bit(MT76_STATE_PM, &mphy->state))
294
goto out;
295
296
for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
297
mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
298
if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL,
299
MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
300
break;
301
}
302
303
if (i == MT7615_DRV_OWN_RETRY_COUNT) {
304
dev_err(dev->mt76.dev, "driver own failed\n");
305
err = -EIO;
306
goto out;
307
}
308
clear_bit(MT76_STATE_PM, &mphy->state);
309
310
pm->stats.last_wake_event = jiffies;
311
pm->stats.doze_time += pm->stats.last_wake_event -
312
pm->stats.last_doze_event;
313
out:
314
mutex_unlock(&pm->mutex);
315
316
return err;
317
}
318
319
static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
320
{
321
struct mt76_phy *mphy = &dev->mt76.phy;
322
struct mt76_connac_pm *pm = &dev->pm;
323
int err = 0;
324
u32 addr;
325
326
mutex_lock(&pm->mutex);
327
328
if (mt76_connac_skip_fw_pmctrl(mphy, pm))
329
goto out;
330
331
mt7622_trigger_hif_int(dev, true);
332
333
addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
334
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
335
336
if (is_mt7622(&dev->mt76) &&
337
!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
338
MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
339
dev_err(dev->mt76.dev, "Timeout for firmware own\n");
340
clear_bit(MT76_STATE_PM, &mphy->state);
341
err = -EIO;
342
}
343
344
mt7622_trigger_hif_int(dev, false);
345
if (!err) {
346
pm->stats.last_doze_event = jiffies;
347
pm->stats.awake_time += pm->stats.last_doze_event -
348
pm->stats.last_wake_event;
349
}
350
out:
351
mutex_unlock(&pm->mutex);
352
353
return err;
354
}
355
356
static void
357
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
358
{
359
if (vif->bss_conf.csa_active)
360
ieee80211_csa_finish(vif, 0);
361
}
362
363
static void
364
mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
365
{
366
struct mt7615_phy *ext_phy = mt7615_ext_phy(dev);
367
struct mt76_phy *mphy = &dev->mt76.phy;
368
struct mt7615_mcu_csa_notify *c;
369
370
c = (struct mt7615_mcu_csa_notify *)skb->data;
371
372
if (c->omac_idx > EXT_BSSID_MAX)
373
return;
374
375
if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
376
mphy = dev->mt76.phys[MT_BAND1];
377
378
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
379
IEEE80211_IFACE_ITER_RESUME_ALL,
380
mt7615_mcu_csa_finish, mphy->hw);
381
}
382
383
static void
384
mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
385
{
386
struct mt76_phy *mphy = &dev->mt76.phy;
387
struct mt7615_mcu_rdd_report *r;
388
389
r = (struct mt7615_mcu_rdd_report *)skb->data;
390
391
if (!dev->radar_pattern.n_pulses && !r->long_detected &&
392
!r->constant_prf_detected && !r->staggered_prf_detected)
393
return;
394
395
if (r->band_idx && dev->mt76.phys[MT_BAND1])
396
mphy = dev->mt76.phys[MT_BAND1];
397
398
if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC)
399
return;
400
401
ieee80211_radar_detected(mphy->hw, NULL);
402
dev->hw_pattern++;
403
}
404
405
static void
406
mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb)
407
{
408
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
409
const char *data = (char *)&rxd[1];
410
const char *type;
411
412
switch (rxd->s2d_index) {
413
case 0:
414
type = "N9";
415
break;
416
case 2:
417
type = "CR4";
418
break;
419
default:
420
type = "unknown";
421
break;
422
}
423
424
wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
425
(int)(skb->len - sizeof(*rxd)), data);
426
}
427
428
static void
429
mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
430
{
431
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
432
433
switch (rxd->ext_eid) {
434
case MCU_EXT_EVENT_RDD_REPORT:
435
mt7615_mcu_rx_radar_detected(dev, skb);
436
break;
437
case MCU_EXT_EVENT_CSA_NOTIFY:
438
mt7615_mcu_rx_csa_notify(dev, skb);
439
break;
440
case MCU_EXT_EVENT_FW_LOG_2_HOST:
441
mt7615_mcu_rx_log_message(dev, skb);
442
break;
443
default:
444
break;
445
}
446
}
447
448
static void
449
mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb)
450
{
451
u8 *seq_num = skb->data + sizeof(struct mt7615_mcu_rxd);
452
struct mt7615_phy *phy;
453
struct mt76_phy *mphy;
454
455
if (*seq_num & BIT(7) && dev->mt76.phys[MT_BAND1])
456
mphy = dev->mt76.phys[MT_BAND1];
457
else
458
mphy = &dev->mt76.phy;
459
460
phy = mphy->priv;
461
462
spin_lock_bh(&dev->mt76.lock);
463
__skb_queue_tail(&phy->scan_event_list, skb);
464
spin_unlock_bh(&dev->mt76.lock);
465
466
ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work,
467
MT7615_HW_SCAN_TIMEOUT);
468
}
469
470
static void
471
mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb)
472
{
473
struct mt7615_roc_tlv *event;
474
struct mt7615_phy *phy;
475
struct mt76_phy *mphy;
476
int duration;
477
478
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
479
event = (struct mt7615_roc_tlv *)skb->data;
480
481
if (event->dbdc_band && dev->mt76.phys[MT_BAND1])
482
mphy = dev->mt76.phys[MT_BAND1];
483
else
484
mphy = &dev->mt76.phy;
485
486
ieee80211_ready_on_channel(mphy->hw);
487
488
phy = mphy->priv;
489
phy->roc_grant = true;
490
wake_up(&phy->roc_wait);
491
492
duration = le32_to_cpu(event->max_interval);
493
mod_timer(&phy->roc_timer,
494
round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
495
}
496
497
static void
498
mt7615_mcu_beacon_loss_event(struct mt7615_dev *dev, struct sk_buff *skb)
499
{
500
struct mt76_connac_beacon_loss_event *event;
501
struct mt76_phy *mphy;
502
u8 band_idx = 0; /* DBDC support */
503
504
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
505
event = (struct mt76_connac_beacon_loss_event *)skb->data;
506
if (band_idx && dev->mt76.phys[MT_BAND1])
507
mphy = dev->mt76.phys[MT_BAND1];
508
else
509
mphy = &dev->mt76.phy;
510
511
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
512
IEEE80211_IFACE_ITER_RESUME_ALL,
513
mt76_connac_mcu_beacon_loss_iter,
514
event);
515
}
516
517
static void
518
mt7615_mcu_bss_event(struct mt7615_dev *dev, struct sk_buff *skb)
519
{
520
struct mt76_connac_mcu_bss_event *event;
521
struct mt76_phy *mphy;
522
u8 band_idx = 0; /* DBDC support */
523
524
skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
525
event = (struct mt76_connac_mcu_bss_event *)skb->data;
526
527
if (band_idx && dev->mt76.phys[MT_BAND1])
528
mphy = dev->mt76.phys[MT_BAND1];
529
else
530
mphy = &dev->mt76.phy;
531
532
if (event->is_absent)
533
ieee80211_stop_queues(mphy->hw);
534
else
535
ieee80211_wake_queues(mphy->hw);
536
}
537
538
static void
539
mt7615_mcu_rx_unsolicited_event(struct mt7615_dev *dev, struct sk_buff *skb)
540
{
541
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
542
543
switch (rxd->eid) {
544
case MCU_EVENT_EXT:
545
mt7615_mcu_rx_ext_event(dev, skb);
546
break;
547
case MCU_EVENT_BSS_BEACON_LOSS:
548
mt7615_mcu_beacon_loss_event(dev, skb);
549
break;
550
case MCU_EVENT_ROC:
551
mt7615_mcu_roc_event(dev, skb);
552
break;
553
case MCU_EVENT_SCHED_SCAN_DONE:
554
case MCU_EVENT_SCAN_DONE:
555
mt7615_mcu_scan_event(dev, skb);
556
return;
557
case MCU_EVENT_BSS_ABSENCE:
558
mt7615_mcu_bss_event(dev, skb);
559
break;
560
case MCU_EVENT_COREDUMP:
561
mt76_connac_mcu_coredump_event(&dev->mt76, skb,
562
&dev->coredump);
563
return;
564
default:
565
break;
566
}
567
dev_kfree_skb(skb);
568
}
569
570
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb)
571
{
572
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
573
574
if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
575
rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
576
rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
577
rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
578
rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
579
rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
580
rxd->eid == MCU_EVENT_BSS_ABSENCE ||
581
rxd->eid == MCU_EVENT_SCAN_DONE ||
582
rxd->eid == MCU_EVENT_COREDUMP ||
583
rxd->eid == MCU_EVENT_ROC ||
584
!rxd->seq)
585
mt7615_mcu_rx_unsolicited_event(dev, skb);
586
else
587
mt76_mcu_rx_event(&dev->mt76, skb);
588
}
589
590
static int
591
mt7615_mcu_muar_config(struct mt7615_dev *dev, struct ieee80211_vif *vif,
592
bool bssid, bool enable)
593
{
594
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
595
u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START;
596
u32 mask = dev->omac_mask >> 32 & ~BIT(idx);
597
const u8 *addr = vif->addr;
598
struct {
599
u8 mode;
600
u8 force_clear;
601
u8 clear_bitmap[8];
602
u8 entry_count;
603
u8 write;
604
605
u8 index;
606
u8 bssid;
607
u8 addr[ETH_ALEN];
608
} __packed req = {
609
.mode = !!mask || enable,
610
.entry_count = 1,
611
.write = 1,
612
613
.index = idx * 2 + bssid,
614
};
615
616
if (bssid)
617
addr = vif->bss_conf.bssid;
618
619
if (enable)
620
ether_addr_copy(req.addr, addr);
621
622
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE),
623
&req, sizeof(req), true);
624
}
625
626
static int
627
mt7615_mcu_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
628
bool enable)
629
{
630
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
631
struct mt7615_dev *dev = phy->dev;
632
struct {
633
struct req_hdr {
634
u8 omac_idx;
635
u8 band_idx;
636
__le16 tlv_num;
637
u8 is_tlv_append;
638
u8 rsv[3];
639
} __packed hdr;
640
struct req_tlv {
641
__le16 tag;
642
__le16 len;
643
u8 active;
644
u8 band_idx;
645
u8 omac_addr[ETH_ALEN];
646
} __packed tlv;
647
} data = {
648
.hdr = {
649
.omac_idx = mvif->mt76.omac_idx,
650
.band_idx = mvif->mt76.band_idx,
651
.tlv_num = cpu_to_le16(1),
652
.is_tlv_append = 1,
653
},
654
.tlv = {
655
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
656
.len = cpu_to_le16(sizeof(struct req_tlv)),
657
.active = enable,
658
.band_idx = mvif->mt76.band_idx,
659
},
660
};
661
662
if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
663
return mt7615_mcu_muar_config(dev, vif, false, enable);
664
665
memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
666
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE),
667
&data, sizeof(data), true);
668
}
669
670
static int
671
mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
672
struct ieee80211_hw *hw,
673
struct ieee80211_vif *vif, bool enable)
674
{
675
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
676
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
677
struct ieee80211_mutable_offsets offs;
678
struct ieee80211_tx_info *info;
679
struct req {
680
u8 omac_idx;
681
u8 enable;
682
u8 wlan_idx;
683
u8 band_idx;
684
u8 pkt_type;
685
u8 need_pre_tbtt_int;
686
__le16 csa_ie_pos;
687
__le16 pkt_len;
688
__le16 tim_ie_pos;
689
u8 pkt[512];
690
u8 csa_cnt;
691
/* bss color change */
692
u8 bcc_cnt;
693
__le16 bcc_ie_pos;
694
} __packed req = {
695
.omac_idx = mvif->mt76.omac_idx,
696
.enable = enable,
697
.wlan_idx = wcid->idx,
698
.band_idx = mvif->mt76.band_idx,
699
};
700
struct sk_buff *skb;
701
702
if (!enable)
703
goto out;
704
705
skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
706
if (!skb)
707
return -EINVAL;
708
709
if (skb->len > 512 - MT_TXD_SIZE) {
710
dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
711
dev_kfree_skb(skb);
712
return -EINVAL;
713
}
714
715
info = IEEE80211_SKB_CB(skb);
716
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, mvif->mt76.band_idx);
717
718
mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
719
0, NULL, 0, true);
720
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
721
req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
722
req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
723
if (offs.cntdwn_counter_offs[0]) {
724
u16 csa_offs;
725
726
csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
727
req.csa_ie_pos = cpu_to_le16(csa_offs);
728
req.csa_cnt = skb->data[offs.cntdwn_counter_offs[0]];
729
}
730
dev_kfree_skb(skb);
731
732
out:
733
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(BCN_OFFLOAD), &req,
734
sizeof(req), true);
735
}
736
737
static int
738
mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
739
{
740
return mt76_connac_mcu_set_pm(&dev->mt76, band, state);
741
}
742
743
static int
744
mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
745
struct ieee80211_sta *sta, bool enable)
746
{
747
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
748
struct mt7615_dev *dev = phy->dev;
749
struct sk_buff *skb;
750
751
if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
752
mt7615_mcu_muar_config(dev, vif, true, enable);
753
754
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL);
755
if (IS_ERR(skb))
756
return PTR_ERR(skb);
757
758
if (enable)
759
mt76_connac_mcu_bss_omac_tlv(skb, vif);
760
761
mt76_connac_mcu_bss_basic_tlv(skb, vif, sta, phy->mt76,
762
mvif->sta.wcid.idx, enable);
763
764
if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
765
mvif->mt76.omac_idx < REPEATER_BSSID_START)
766
mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
767
768
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
769
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
770
}
771
772
static int
773
mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
774
struct ieee80211_ampdu_params *params,
775
bool enable)
776
{
777
struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
778
struct mt7615_vif *mvif = msta->vif;
779
struct wtbl_req_hdr *wtbl_hdr;
780
struct sk_buff *skb = NULL;
781
int err;
782
783
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
784
WTBL_SET, NULL, &skb);
785
if (IS_ERR(wtbl_hdr))
786
return PTR_ERR(wtbl_hdr);
787
788
mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, true,
789
NULL, wtbl_hdr);
790
791
err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
792
MCU_EXT_CMD(WTBL_UPDATE), true);
793
if (err < 0)
794
return err;
795
796
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
797
&msta->wcid);
798
if (IS_ERR(skb))
799
return PTR_ERR(skb);
800
801
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, true);
802
803
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
804
MCU_EXT_CMD(STA_REC_UPDATE), true);
805
}
806
807
static int
808
mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
809
struct ieee80211_ampdu_params *params,
810
bool enable)
811
{
812
struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
813
struct mt7615_vif *mvif = msta->vif;
814
struct wtbl_req_hdr *wtbl_hdr;
815
struct sk_buff *skb;
816
int err;
817
818
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
819
&msta->wcid);
820
if (IS_ERR(skb))
821
return PTR_ERR(skb);
822
823
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false);
824
825
err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
826
MCU_EXT_CMD(STA_REC_UPDATE), true);
827
if (err < 0 || !enable)
828
return err;
829
830
skb = NULL;
831
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
832
WTBL_SET, NULL, &skb);
833
if (IS_ERR(wtbl_hdr))
834
return PTR_ERR(wtbl_hdr);
835
836
mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false,
837
NULL, wtbl_hdr);
838
839
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
840
MCU_EXT_CMD(WTBL_UPDATE), true);
841
}
842
843
static int
844
mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
845
struct ieee80211_sta *sta, bool enable)
846
{
847
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
848
struct sk_buff *skb, *sskb, *wskb = NULL;
849
struct ieee80211_link_sta *link_sta;
850
struct mt7615_dev *dev = phy->dev;
851
struct wtbl_req_hdr *wtbl_hdr;
852
struct mt7615_sta *msta;
853
bool new_entry = true;
854
int conn_state;
855
int cmd, err;
856
857
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
858
link_sta = sta ? &sta->deflink : NULL;
859
860
sskb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
861
&msta->wcid);
862
if (IS_ERR(sskb))
863
return PTR_ERR(sskb);
864
865
if (!sta) {
866
if (mvif->sta_added)
867
new_entry = false;
868
else
869
mvif->sta_added = true;
870
}
871
conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
872
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, &vif->bss_conf,
873
link_sta, conn_state, new_entry);
874
if (enable && sta)
875
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
876
MT76_STA_INFO_STATE_ASSOC);
877
878
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
879
WTBL_RESET_AND_SET, NULL,
880
&wskb);
881
if (IS_ERR(wtbl_hdr))
882
return PTR_ERR(wtbl_hdr);
883
884
if (enable) {
885
mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, wskb, vif, sta,
886
NULL, wtbl_hdr);
887
if (sta)
888
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
889
NULL, wtbl_hdr, true, true);
890
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
891
NULL, wtbl_hdr);
892
}
893
894
cmd = enable ? MCU_EXT_CMD(WTBL_UPDATE) : MCU_EXT_CMD(STA_REC_UPDATE);
895
skb = enable ? wskb : sskb;
896
897
err = mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
898
if (err < 0) {
899
skb = enable ? sskb : wskb;
900
dev_kfree_skb(skb);
901
902
return err;
903
}
904
905
cmd = enable ? MCU_EXT_CMD(STA_REC_UPDATE) : MCU_EXT_CMD(WTBL_UPDATE);
906
skb = enable ? sskb : wskb;
907
908
return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
909
}
910
911
static int
912
mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
913
struct ieee80211_vif *vif,
914
struct ieee80211_sta *sta)
915
{
916
return mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
917
}
918
919
static const struct mt7615_mcu_ops wtbl_update_ops = {
920
.add_beacon_offload = mt7615_mcu_add_beacon_offload,
921
.set_pm_state = mt7615_mcu_ctrl_pm_state,
922
.add_dev_info = mt7615_mcu_add_dev,
923
.add_bss_info = mt7615_mcu_add_bss,
924
.add_tx_ba = mt7615_mcu_wtbl_tx_ba,
925
.add_rx_ba = mt7615_mcu_wtbl_rx_ba,
926
.sta_add = mt7615_mcu_wtbl_sta_add,
927
.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
928
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
929
.set_sta_decap_offload = mt7615_mcu_wtbl_update_hdr_trans,
930
};
931
932
static int
933
mt7615_mcu_sta_ba(struct mt7615_dev *dev,
934
struct ieee80211_ampdu_params *params,
935
bool enable, bool tx)
936
{
937
struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
938
struct mt7615_vif *mvif = msta->vif;
939
struct wtbl_req_hdr *wtbl_hdr;
940
struct tlv *sta_wtbl;
941
struct sk_buff *skb;
942
943
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
944
&msta->wcid);
945
if (IS_ERR(skb))
946
return PTR_ERR(skb);
947
948
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
949
950
sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
951
952
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
953
WTBL_SET, sta_wtbl, &skb);
954
if (IS_ERR(wtbl_hdr))
955
return PTR_ERR(wtbl_hdr);
956
957
mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
958
sta_wtbl, wtbl_hdr);
959
960
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
961
MCU_EXT_CMD(STA_REC_UPDATE), true);
962
}
963
964
static int
965
mt7615_mcu_sta_tx_ba(struct mt7615_dev *dev,
966
struct ieee80211_ampdu_params *params,
967
bool enable)
968
{
969
return mt7615_mcu_sta_ba(dev, params, enable, true);
970
}
971
972
static int
973
mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
974
struct ieee80211_ampdu_params *params,
975
bool enable)
976
{
977
return mt7615_mcu_sta_ba(dev, params, enable, false);
978
}
979
980
static int
981
__mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif,
982
struct ieee80211_sta *sta, bool enable, int cmd,
983
bool offload_fw)
984
{
985
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
986
struct mt76_sta_cmd_info info = {
987
.sta = sta,
988
.vif = vif,
989
.offload_fw = offload_fw,
990
.enable = enable,
991
.newly = true,
992
.cmd = cmd,
993
};
994
995
info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
996
return mt76_connac_mcu_sta_cmd(phy, &info);
997
}
998
999
static int
1000
mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
1001
struct ieee80211_sta *sta, bool enable)
1002
{
1003
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
1004
MCU_EXT_CMD(STA_REC_UPDATE), false);
1005
}
1006
1007
static int
1008
mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
1009
struct ieee80211_vif *vif,
1010
struct ieee80211_sta *sta)
1011
{
1012
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
1013
1014
return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
1015
vif, &msta->wcid,
1016
MCU_EXT_CMD(STA_REC_UPDATE));
1017
}
1018
1019
static const struct mt7615_mcu_ops sta_update_ops = {
1020
.add_beacon_offload = mt7615_mcu_add_beacon_offload,
1021
.set_pm_state = mt7615_mcu_ctrl_pm_state,
1022
.add_dev_info = mt7615_mcu_add_dev,
1023
.add_bss_info = mt7615_mcu_add_bss,
1024
.add_tx_ba = mt7615_mcu_sta_tx_ba,
1025
.add_rx_ba = mt7615_mcu_sta_rx_ba,
1026
.sta_add = mt7615_mcu_add_sta,
1027
.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
1028
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
1029
.set_sta_decap_offload = mt7615_mcu_sta_update_hdr_trans,
1030
};
1031
1032
static int
1033
mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
1034
{
1035
return 0;
1036
}
1037
1038
static int
1039
mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
1040
struct ieee80211_hw *hw,
1041
struct ieee80211_vif *vif,
1042
bool enable)
1043
{
1044
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
1045
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
1046
struct ieee80211_mutable_offsets offs;
1047
struct {
1048
struct req_hdr {
1049
u8 bss_idx;
1050
u8 pad[3];
1051
} __packed hdr;
1052
struct bcn_content_tlv {
1053
__le16 tag;
1054
__le16 len;
1055
__le16 tim_ie_pos;
1056
__le16 csa_ie_pos;
1057
__le16 bcc_ie_pos;
1058
/* 0: disable beacon offload
1059
* 1: enable beacon offload
1060
* 2: update probe respond offload
1061
*/
1062
u8 enable;
1063
/* 0: legacy format (TXD + payload)
1064
* 1: only cap field IE
1065
*/
1066
u8 type;
1067
__le16 pkt_len;
1068
u8 pkt[512];
1069
} __packed beacon_tlv;
1070
} req = {
1071
.hdr = {
1072
.bss_idx = mvif->mt76.idx,
1073
},
1074
.beacon_tlv = {
1075
.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
1076
.len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
1077
.enable = enable,
1078
},
1079
};
1080
struct sk_buff *skb;
1081
1082
if (!enable)
1083
goto out;
1084
1085
skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
1086
if (!skb)
1087
return -EINVAL;
1088
1089
if (skb->len > 512 - MT_TXD_SIZE) {
1090
dev_err(dev->mt76.dev, "beacon size limit exceed\n");
1091
dev_kfree_skb(skb);
1092
return -EINVAL;
1093
}
1094
1095
mt7615_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
1096
wcid, NULL, 0, NULL, 0, true);
1097
memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
1098
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
1099
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
1100
1101
if (offs.cntdwn_counter_offs[0]) {
1102
u16 csa_offs;
1103
1104
csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
1105
req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
1106
}
1107
dev_kfree_skb(skb);
1108
1109
out:
1110
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
1111
&req, sizeof(req), true);
1112
}
1113
1114
static int
1115
mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
1116
bool enable)
1117
{
1118
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
1119
1120
return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf, &mvif->mt76,
1121
&mvif->sta.wcid, enable);
1122
}
1123
1124
static int
1125
mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
1126
struct ieee80211_sta *sta, bool enable)
1127
{
1128
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
1129
1130
return mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
1131
enable, NULL);
1132
}
1133
1134
static inline int
1135
mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
1136
struct ieee80211_sta *sta, bool enable)
1137
{
1138
return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
1139
MCU_UNI_CMD(STA_REC_UPDATE), true);
1140
}
1141
1142
static int
1143
mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
1144
struct ieee80211_ampdu_params *params,
1145
bool enable)
1146
{
1147
struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv;
1148
1149
return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params,
1150
MCU_UNI_CMD(STA_REC_UPDATE), enable,
1151
true);
1152
}
1153
1154
static int
1155
mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
1156
struct ieee80211_ampdu_params *params,
1157
bool enable)
1158
{
1159
struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
1160
struct mt7615_vif *mvif = msta->vif;
1161
struct wtbl_req_hdr *wtbl_hdr;
1162
struct tlv *sta_wtbl;
1163
struct sk_buff *skb;
1164
int err;
1165
1166
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
1167
&msta->wcid);
1168
if (IS_ERR(skb))
1169
return PTR_ERR(skb);
1170
1171
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false);
1172
1173
err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1174
MCU_UNI_CMD(STA_REC_UPDATE), true);
1175
if (err < 0 || !enable)
1176
return err;
1177
1178
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
1179
&msta->wcid);
1180
if (IS_ERR(skb))
1181
return PTR_ERR(skb);
1182
1183
sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
1184
sizeof(struct tlv));
1185
1186
wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
1187
WTBL_SET, sta_wtbl, &skb);
1188
if (IS_ERR(wtbl_hdr))
1189
return PTR_ERR(wtbl_hdr);
1190
1191
mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false,
1192
sta_wtbl, wtbl_hdr);
1193
1194
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
1195
MCU_UNI_CMD(STA_REC_UPDATE), true);
1196
}
1197
1198
static int
1199
mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev,
1200
struct ieee80211_vif *vif,
1201
struct ieee80211_sta *sta)
1202
{
1203
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
1204
1205
return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
1206
vif, &msta->wcid,
1207
MCU_UNI_CMD(STA_REC_UPDATE));
1208
}
1209
1210
static const struct mt7615_mcu_ops uni_update_ops = {
1211
.add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
1212
.set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
1213
.add_dev_info = mt7615_mcu_uni_add_dev,
1214
.add_bss_info = mt7615_mcu_uni_add_bss,
1215
.add_tx_ba = mt7615_mcu_uni_tx_ba,
1216
.add_rx_ba = mt7615_mcu_uni_rx_ba,
1217
.sta_add = mt7615_mcu_uni_add_sta,
1218
.set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
1219
.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
1220
.set_sta_decap_offload = mt7615_mcu_sta_uni_update_hdr_trans,
1221
};
1222
1223
int mt7615_mcu_restart(struct mt76_dev *dev)
1224
{
1225
return mt76_mcu_send_msg(dev, MCU_CMD(RESTART_DL_REQ), NULL, 0, true);
1226
}
1227
EXPORT_SYMBOL_GPL(mt7615_mcu_restart);
1228
1229
static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
1230
{
1231
const struct mt7615_patch_hdr *hdr;
1232
const struct firmware *fw = NULL;
1233
int len, ret, sem;
1234
1235
ret = firmware_request_nowarn(&fw, name, dev->mt76.dev);
1236
if (ret)
1237
return ret;
1238
1239
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
1240
dev_err(dev->mt76.dev, "Invalid firmware\n");
1241
ret = -EINVAL;
1242
goto release_fw;
1243
}
1244
1245
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
1246
switch (sem) {
1247
case PATCH_IS_DL:
1248
goto release_fw;
1249
case PATCH_NOT_DL_SEM_SUCCESS:
1250
break;
1251
default:
1252
dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
1253
ret = -EAGAIN;
1254
goto release_fw;
1255
}
1256
1257
hdr = (const struct mt7615_patch_hdr *)(fw->data);
1258
1259
dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
1260
be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
1261
1262
len = fw->size - sizeof(*hdr);
1263
1264
ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
1265
DL_MODE_NEED_RSP);
1266
if (ret) {
1267
dev_err(dev->mt76.dev, "Download request failed\n");
1268
goto out;
1269
}
1270
1271
ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
1272
fw->data + sizeof(*hdr), len);
1273
if (ret) {
1274
dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
1275
goto out;
1276
}
1277
1278
ret = mt76_connac_mcu_start_patch(&dev->mt76);
1279
if (ret)
1280
dev_err(dev->mt76.dev, "Failed to start patch\n");
1281
1282
out:
1283
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
1284
switch (sem) {
1285
case PATCH_REL_SEM_SUCCESS:
1286
break;
1287
default:
1288
ret = -EAGAIN;
1289
dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
1290
break;
1291
}
1292
1293
release_fw:
1294
release_firmware(fw);
1295
1296
return ret;
1297
}
1298
1299
static int
1300
mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
1301
const struct mt7615_fw_trailer *hdr,
1302
const u8 *data, bool is_cr4)
1303
{
1304
int n_region = is_cr4 ? CR4_REGION_NUM : N9_REGION_NUM;
1305
int err, i, offset = 0;
1306
u32 len, addr, mode;
1307
1308
for (i = 0; i < n_region; i++) {
1309
mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
1310
hdr[i].feature_set, is_cr4);
1311
len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
1312
addr = le32_to_cpu(hdr[i].addr);
1313
1314
err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
1315
mode);
1316
if (err) {
1317
dev_err(dev->mt76.dev, "Download request failed\n");
1318
return err;
1319
}
1320
1321
err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
1322
data + offset, len);
1323
if (err) {
1324
dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
1325
return err;
1326
}
1327
1328
offset += len;
1329
}
1330
1331
return 0;
1332
}
1333
1334
static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
1335
{
1336
const struct mt7615_fw_trailer *hdr;
1337
const struct firmware *fw;
1338
int ret;
1339
1340
ret = request_firmware(&fw, name, dev->mt76.dev);
1341
if (ret)
1342
return ret;
1343
1344
if (!fw || !fw->data || fw->size < N9_REGION_NUM * sizeof(*hdr)) {
1345
dev_err(dev->mt76.dev, "Invalid firmware\n");
1346
ret = -EINVAL;
1347
goto out;
1348
}
1349
1350
hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
1351
N9_REGION_NUM * sizeof(*hdr));
1352
1353
dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
1354
hdr->fw_ver, hdr->build_date);
1355
1356
ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, false);
1357
if (ret)
1358
goto out;
1359
1360
ret = mt76_connac_mcu_start_firmware(&dev->mt76,
1361
le32_to_cpu(hdr->addr),
1362
FW_START_OVERRIDE);
1363
if (ret) {
1364
dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
1365
goto out;
1366
}
1367
1368
snprintf(dev->mt76.hw->wiphy->fw_version,
1369
sizeof(dev->mt76.hw->wiphy->fw_version),
1370
"%.10s-%.15s", hdr->fw_ver, hdr->build_date);
1371
1372
if (!is_mt7615(&dev->mt76)) {
1373
dev->fw_ver = MT7615_FIRMWARE_V2;
1374
dev->mcu_ops = &sta_update_ops;
1375
} else {
1376
dev->fw_ver = MT7615_FIRMWARE_V1;
1377
dev->mcu_ops = &wtbl_update_ops;
1378
}
1379
1380
out:
1381
release_firmware(fw);
1382
return ret;
1383
}
1384
1385
static int mt7615_load_cr4(struct mt7615_dev *dev, const char *name)
1386
{
1387
const struct mt7615_fw_trailer *hdr;
1388
const struct firmware *fw;
1389
int ret;
1390
1391
ret = request_firmware(&fw, name, dev->mt76.dev);
1392
if (ret)
1393
return ret;
1394
1395
if (!fw || !fw->data || fw->size < CR4_REGION_NUM * sizeof(*hdr)) {
1396
dev_err(dev->mt76.dev, "Invalid firmware\n");
1397
ret = -EINVAL;
1398
goto out;
1399
}
1400
1401
hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
1402
CR4_REGION_NUM * sizeof(*hdr));
1403
1404
dev_info(dev->mt76.dev, "CR4 Firmware Version: %.10s, Build Time: %.15s\n",
1405
hdr->fw_ver, hdr->build_date);
1406
1407
ret = mt7615_mcu_send_ram_firmware(dev, hdr, fw->data, true);
1408
if (ret)
1409
goto out;
1410
1411
ret = mt76_connac_mcu_start_firmware(&dev->mt76, 0,
1412
FW_START_WORKING_PDA_CR4);
1413
if (ret) {
1414
dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
1415
goto out;
1416
}
1417
1418
out:
1419
release_firmware(fw);
1420
1421
return ret;
1422
}
1423
1424
static int mt7615_load_ram(struct mt7615_dev *dev)
1425
{
1426
int ret;
1427
1428
ret = mt7615_load_n9(dev, MT7615_FIRMWARE_N9);
1429
if (ret)
1430
return ret;
1431
1432
return mt7615_load_cr4(dev, MT7615_FIRMWARE_CR4);
1433
}
1434
1435
static int mt7615_load_firmware(struct mt7615_dev *dev)
1436
{
1437
int ret;
1438
u32 val;
1439
1440
val = mt76_get_field(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE);
1441
1442
if (val != FW_STATE_FW_DOWNLOAD) {
1443
dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
1444
return -EIO;
1445
}
1446
1447
ret = mt7615_load_patch(dev, MT7615_PATCH_ADDRESS, MT7615_ROM_PATCH);
1448
if (ret)
1449
return ret;
1450
1451
ret = mt7615_load_ram(dev);
1452
if (ret)
1453
return ret;
1454
1455
if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE,
1456
FIELD_PREP(MT_TOP_MISC2_FW_STATE,
1457
FW_STATE_RDY), 500)) {
1458
dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
1459
return -EIO;
1460
}
1461
1462
return 0;
1463
}
1464
1465
static int mt7622_load_firmware(struct mt7615_dev *dev)
1466
{
1467
int ret;
1468
u32 val;
1469
1470
mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
1471
1472
val = mt76_get_field(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE);
1473
if (val != FW_STATE_FW_DOWNLOAD) {
1474
dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
1475
return -EIO;
1476
}
1477
1478
ret = mt7615_load_patch(dev, MT7622_PATCH_ADDRESS, MT7622_ROM_PATCH);
1479
if (ret)
1480
return ret;
1481
1482
ret = mt7615_load_n9(dev, MT7622_FIRMWARE_N9);
1483
if (ret)
1484
return ret;
1485
1486
if (!mt76_poll_msec(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE,
1487
FIELD_PREP(MT_TOP_OFF_RSV_FW_STATE,
1488
FW_STATE_NORMAL_TRX), 1500)) {
1489
dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
1490
return -EIO;
1491
}
1492
1493
mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
1494
1495
return 0;
1496
}
1497
1498
int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
1499
{
1500
struct {
1501
u8 ctrl_val;
1502
u8 pad[3];
1503
} data = {
1504
.ctrl_val = ctrl
1505
};
1506
1507
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST),
1508
&data, sizeof(data), true);
1509
}
1510
1511
static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev)
1512
{
1513
struct {
1514
bool cache_enable;
1515
u8 pad[3];
1516
} data = {
1517
.cache_enable = true
1518
};
1519
1520
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(CAL_CACHE), &data,
1521
sizeof(data), false);
1522
}
1523
1524
static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
1525
{
1526
u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL;
1527
const struct mt76_connac2_fw_trailer *hdr;
1528
const struct mt7663_fw_buf *buf;
1529
const struct firmware *fw;
1530
const u8 *base_addr;
1531
int i, ret;
1532
1533
ret = request_firmware(&fw, name, dev->mt76.dev);
1534
if (ret)
1535
return ret;
1536
1537
if (!fw || !fw->data || fw->size < FW_V3_COMMON_TAILER_SIZE) {
1538
dev_err(dev->mt76.dev, "Invalid firmware\n");
1539
ret = -EINVAL;
1540
goto out;
1541
}
1542
1543
hdr = (const void *)(fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE);
1544
dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
1545
hdr->fw_ver, hdr->build_date);
1546
dev_info(dev->mt76.dev, "Region number: 0x%x\n", hdr->n_region);
1547
1548
base_addr = fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE;
1549
for (i = 0; i < hdr->n_region; i++) {
1550
u32 shift = (hdr->n_region - i) * FW_V3_REGION_TAILER_SIZE;
1551
u32 len, addr, mode;
1552
1553
dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
1554
1555
buf = (const struct mt7663_fw_buf *)(base_addr - shift);
1556
mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
1557
buf->feature_set, false);
1558
addr = le32_to_cpu(buf->img_dest_addr);
1559
len = le32_to_cpu(buf->img_size);
1560
1561
ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
1562
mode);
1563
if (ret) {
1564
dev_err(dev->mt76.dev, "Download request failed\n");
1565
goto out;
1566
}
1567
1568
ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
1569
fw->data + offset, len);
1570
if (ret) {
1571
dev_err(dev->mt76.dev, "Failed to send firmware\n");
1572
goto out;
1573
}
1574
1575
offset += le32_to_cpu(buf->img_size);
1576
if (buf->feature_set & DL_MODE_VALID_RAM_ENTRY) {
1577
override_addr = le32_to_cpu(buf->img_dest_addr);
1578
dev_info(dev->mt76.dev, "Region %d, override_addr = 0x%08x\n",
1579
i, override_addr);
1580
}
1581
}
1582
1583
if (override_addr)
1584
flag |= FW_START_OVERRIDE;
1585
1586
dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n",
1587
override_addr, flag);
1588
1589
ret = mt76_connac_mcu_start_firmware(&dev->mt76, override_addr, flag);
1590
if (ret) {
1591
dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
1592
goto out;
1593
}
1594
1595
snprintf(dev->mt76.hw->wiphy->fw_version,
1596
sizeof(dev->mt76.hw->wiphy->fw_version),
1597
"%.10s-%.15s", hdr->fw_ver, hdr->build_date);
1598
1599
out:
1600
release_firmware(fw);
1601
1602
return ret;
1603
}
1604
1605
static int
1606
mt7663_load_rom_patch(struct mt7615_dev *dev, const char **n9_firmware)
1607
{
1608
const char *selected_rom, *secondary_rom = MT7663_ROM_PATCH;
1609
const char *primary_rom = MT7663_OFFLOAD_ROM_PATCH;
1610
int ret;
1611
1612
if (!prefer_offload_fw) {
1613
secondary_rom = MT7663_OFFLOAD_ROM_PATCH;
1614
primary_rom = MT7663_ROM_PATCH;
1615
}
1616
selected_rom = primary_rom;
1617
1618
ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, primary_rom);
1619
if (ret) {
1620
dev_info(dev->mt76.dev, "%s not found, switching to %s",
1621
primary_rom, secondary_rom);
1622
ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS,
1623
secondary_rom);
1624
if (ret) {
1625
dev_err(dev->mt76.dev, "failed to load %s",
1626
secondary_rom);
1627
return ret;
1628
}
1629
selected_rom = secondary_rom;
1630
}
1631
1632
if (!strcmp(selected_rom, MT7663_OFFLOAD_ROM_PATCH)) {
1633
*n9_firmware = MT7663_OFFLOAD_FIRMWARE_N9;
1634
dev->fw_ver = MT7615_FIRMWARE_V3;
1635
dev->mcu_ops = &uni_update_ops;
1636
} else {
1637
*n9_firmware = MT7663_FIRMWARE_N9;
1638
dev->fw_ver = MT7615_FIRMWARE_V2;
1639
dev->mcu_ops = &sta_update_ops;
1640
}
1641
1642
return 0;
1643
}
1644
1645
int __mt7663_load_firmware(struct mt7615_dev *dev)
1646
{
1647
const char *n9_firmware;
1648
int ret;
1649
1650
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
1651
if (ret) {
1652
dev_dbg(dev->mt76.dev, "Firmware is already download\n");
1653
return -EIO;
1654
}
1655
1656
ret = mt7663_load_rom_patch(dev, &n9_firmware);
1657
if (ret)
1658
return ret;
1659
1660
ret = mt7663_load_n9(dev, n9_firmware);
1661
if (ret)
1662
return ret;
1663
1664
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
1665
MT_TOP_MISC2_FW_N9_RDY, 1500)) {
1666
ret = mt76_get_field(dev, MT_CONN_ON_MISC,
1667
MT7663_TOP_MISC2_FW_STATE);
1668
dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
1669
return -EIO;
1670
}
1671
1672
#ifdef CONFIG_PM
1673
if (mt7615_firmware_offload(dev))
1674
dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
1675
#endif /* CONFIG_PM */
1676
1677
dev_dbg(dev->mt76.dev, "Firmware init done\n");
1678
1679
return 0;
1680
}
1681
EXPORT_SYMBOL_GPL(__mt7663_load_firmware);
1682
1683
static int mt7663_load_firmware(struct mt7615_dev *dev)
1684
{
1685
int ret;
1686
1687
mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
1688
1689
ret = __mt7663_load_firmware(dev);
1690
if (ret)
1691
return ret;
1692
1693
mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
1694
1695
return 0;
1696
}
1697
1698
int mt7615_mcu_init(struct mt7615_dev *dev)
1699
{
1700
static const struct mt76_mcu_ops mt7615_mcu_ops = {
1701
.headroom = sizeof(struct mt7615_mcu_txd),
1702
.mcu_skb_send_msg = mt7615_mcu_send_message,
1703
.mcu_parse_response = mt7615_mcu_parse_response,
1704
};
1705
int ret;
1706
1707
dev->mt76.mcu_ops = &mt7615_mcu_ops;
1708
1709
ret = mt7615_mcu_drv_pmctrl(dev);
1710
if (ret)
1711
return ret;
1712
1713
switch (mt76_chip(&dev->mt76)) {
1714
case 0x7622:
1715
ret = mt7622_load_firmware(dev);
1716
break;
1717
case 0x7663:
1718
ret = mt7663_load_firmware(dev);
1719
break;
1720
default:
1721
ret = mt7615_load_firmware(dev);
1722
break;
1723
}
1724
if (ret)
1725
return ret;
1726
1727
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
1728
dev_dbg(dev->mt76.dev, "Firmware init done\n");
1729
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1730
1731
if (dev->dbdc_support) {
1732
ret = mt7615_mcu_cal_cache_apply(dev);
1733
if (ret)
1734
return ret;
1735
}
1736
1737
return mt7615_mcu_fw_log_2_host(dev, 0);
1738
}
1739
EXPORT_SYMBOL_GPL(mt7615_mcu_init);
1740
1741
void mt7615_mcu_exit(struct mt7615_dev *dev)
1742
{
1743
mt7615_mcu_restart(&dev->mt76);
1744
mt7615_mcu_set_fw_ctrl(dev);
1745
skb_queue_purge(&dev->mt76.mcu.res_q);
1746
}
1747
EXPORT_SYMBOL_GPL(mt7615_mcu_exit);
1748
1749
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
1750
{
1751
struct {
1752
u8 buffer_mode;
1753
u8 content_format;
1754
__le16 len;
1755
} __packed req_hdr = {
1756
.buffer_mode = 1,
1757
};
1758
u8 *eep = (u8 *)dev->mt76.eeprom.data;
1759
struct sk_buff *skb;
1760
int eep_len, offset;
1761
1762
switch (mt76_chip(&dev->mt76)) {
1763
case 0x7622:
1764
eep_len = MT7622_EE_MAX - MT_EE_NIC_CONF_0;
1765
offset = MT_EE_NIC_CONF_0;
1766
break;
1767
case 0x7663:
1768
eep_len = MT7663_EE_MAX - MT_EE_CHIP_ID;
1769
req_hdr.content_format = 1;
1770
offset = MT_EE_CHIP_ID;
1771
break;
1772
default:
1773
eep_len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
1774
offset = MT_EE_NIC_CONF_0;
1775
break;
1776
}
1777
1778
req_hdr.len = cpu_to_le16(eep_len);
1779
1780
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + eep_len);
1781
if (!skb)
1782
return -ENOMEM;
1783
1784
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
1785
skb_put_data(skb, eep + offset, eep_len);
1786
1787
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
1788
MCU_EXT_CMD(EFUSE_BUFFER_MODE), true);
1789
}
1790
1791
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
1792
const struct ieee80211_tx_queue_params *params)
1793
{
1794
#define WMM_AIFS_SET BIT(0)
1795
#define WMM_CW_MIN_SET BIT(1)
1796
#define WMM_CW_MAX_SET BIT(2)
1797
#define WMM_TXOP_SET BIT(3)
1798
#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
1799
WMM_CW_MAX_SET | WMM_TXOP_SET)
1800
struct req_data {
1801
u8 number;
1802
u8 rsv[3];
1803
u8 queue;
1804
u8 valid;
1805
u8 aifs;
1806
u8 cw_min;
1807
__le16 cw_max;
1808
__le16 txop;
1809
} __packed req = {
1810
.number = 1,
1811
.queue = queue,
1812
.valid = WMM_PARAM_SET,
1813
.aifs = params->aifs,
1814
.cw_min = 5,
1815
.cw_max = cpu_to_le16(10),
1816
.txop = cpu_to_le16(params->txop),
1817
};
1818
1819
if (params->cw_min)
1820
req.cw_min = fls(params->cw_min);
1821
if (params->cw_max)
1822
req.cw_max = cpu_to_le16(fls(params->cw_max));
1823
1824
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
1825
&req, sizeof(req), true);
1826
}
1827
1828
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev)
1829
{
1830
struct mt7615_phy *ext_phy = mt7615_ext_phy(dev);
1831
struct dbdc_entry {
1832
u8 type;
1833
u8 index;
1834
u8 band;
1835
u8 _rsv;
1836
};
1837
struct {
1838
u8 enable;
1839
u8 num;
1840
u8 _rsv[2];
1841
struct dbdc_entry entry[64];
1842
} req = {
1843
.enable = !!ext_phy,
1844
};
1845
int i;
1846
1847
if (!ext_phy)
1848
goto out;
1849
1850
#define ADD_DBDC_ENTRY(_type, _idx, _band) \
1851
do { \
1852
req.entry[req.num].type = _type; \
1853
req.entry[req.num].index = _idx; \
1854
req.entry[req.num++].band = _band; \
1855
} while (0)
1856
1857
for (i = 0; i < 4; i++) {
1858
bool band = !!(ext_phy->omac_mask & BIT_ULL(i));
1859
1860
ADD_DBDC_ENTRY(DBDC_TYPE_BSS, i, band);
1861
}
1862
1863
for (i = 0; i < 14; i++) {
1864
bool band = !!(ext_phy->omac_mask & BIT_ULL(0x11 + i));
1865
1866
ADD_DBDC_ENTRY(DBDC_TYPE_MBSS, i, band);
1867
}
1868
1869
ADD_DBDC_ENTRY(DBDC_TYPE_MU, 0, 1);
1870
1871
for (i = 0; i < 3; i++)
1872
ADD_DBDC_ENTRY(DBDC_TYPE_BF, i, 1);
1873
1874
ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 0, 0);
1875
ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 1, 0);
1876
ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 2, 1);
1877
ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 3, 1);
1878
1879
ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 0, 0);
1880
ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1);
1881
1882
out:
1883
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DBDC_CTRL), &req,
1884
sizeof(req), true);
1885
}
1886
1887
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
1888
{
1889
struct {
1890
__le16 tag;
1891
__le16 min_lpn;
1892
} req = {
1893
.tag = cpu_to_le16(0x1),
1894
.min_lpn = cpu_to_le16(val),
1895
};
1896
1897
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH),
1898
&req, sizeof(req), true);
1899
}
1900
1901
int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
1902
const struct mt7615_dfs_pulse *pulse)
1903
{
1904
struct {
1905
__le16 tag;
1906
__le32 max_width; /* us */
1907
__le32 max_pwr; /* dbm */
1908
__le32 min_pwr; /* dbm */
1909
__le32 min_stgr_pri; /* us */
1910
__le32 max_stgr_pri; /* us */
1911
__le32 min_cr_pri; /* us */
1912
__le32 max_cr_pri; /* us */
1913
} req = {
1914
.tag = cpu_to_le16(0x3),
1915
#define __req_field(field) .field = cpu_to_le32(pulse->field)
1916
__req_field(max_width),
1917
__req_field(max_pwr),
1918
__req_field(min_pwr),
1919
__req_field(min_stgr_pri),
1920
__req_field(max_stgr_pri),
1921
__req_field(min_cr_pri),
1922
__req_field(max_cr_pri),
1923
#undef __req_field
1924
};
1925
1926
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH),
1927
&req, sizeof(req), true);
1928
}
1929
1930
int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
1931
const struct mt7615_dfs_pattern *pattern)
1932
{
1933
struct {
1934
__le16 tag;
1935
__le16 radar_type;
1936
u8 enb;
1937
u8 stgr;
1938
u8 min_crpn;
1939
u8 max_crpn;
1940
u8 min_crpr;
1941
u8 min_pw;
1942
u8 max_pw;
1943
__le32 min_pri;
1944
__le32 max_pri;
1945
u8 min_crbn;
1946
u8 max_crbn;
1947
u8 min_stgpn;
1948
u8 max_stgpn;
1949
u8 min_stgpr;
1950
} req = {
1951
.tag = cpu_to_le16(0x2),
1952
.radar_type = cpu_to_le16(index),
1953
#define __req_field_u8(field) .field = pattern->field
1954
#define __req_field_u32(field) .field = cpu_to_le32(pattern->field)
1955
__req_field_u8(enb),
1956
__req_field_u8(stgr),
1957
__req_field_u8(min_crpn),
1958
__req_field_u8(max_crpn),
1959
__req_field_u8(min_crpr),
1960
__req_field_u8(min_pw),
1961
__req_field_u8(max_pw),
1962
__req_field_u32(min_pri),
1963
__req_field_u32(max_pri),
1964
__req_field_u8(min_crbn),
1965
__req_field_u8(max_crbn),
1966
__req_field_u8(min_stgpn),
1967
__req_field_u8(max_stgpn),
1968
__req_field_u8(min_stgpr),
1969
#undef __req_field_u8
1970
#undef __req_field_u32
1971
};
1972
1973
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH),
1974
&req, sizeof(req), true);
1975
}
1976
1977
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
1978
{
1979
struct {
1980
u8 pulse_num;
1981
u8 rsv[3];
1982
struct {
1983
__le32 start_time;
1984
__le16 width;
1985
__le16 power;
1986
} pattern[32];
1987
} req = {
1988
.pulse_num = dev->radar_pattern.n_pulses,
1989
};
1990
u32 start_time = ktime_to_ms(ktime_get_boottime());
1991
int i;
1992
1993
if (dev->radar_pattern.n_pulses > ARRAY_SIZE(req.pattern))
1994
return -EINVAL;
1995
1996
/* TODO: add some noise here */
1997
for (i = 0; i < dev->radar_pattern.n_pulses; i++) {
1998
u32 ts = start_time + i * dev->radar_pattern.period;
1999
2000
req.pattern[i].width = cpu_to_le16(dev->radar_pattern.width);
2001
req.pattern[i].power = cpu_to_le16(dev->radar_pattern.power);
2002
req.pattern[i].start_time = cpu_to_le32(ts);
2003
}
2004
2005
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_PATTERN),
2006
&req, sizeof(req), false);
2007
}
2008
2009
static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
2010
{
2011
struct mt76_phy *mphy = phy->mt76;
2012
struct ieee80211_hw *hw = mphy->hw;
2013
struct mt76_power_limits limits;
2014
s8 *limits_array = (s8 *)&limits;
2015
int n_chains = hweight8(mphy->antenna_mask);
2016
int tx_power = hw->conf.power_level * 2;
2017
int i;
2018
static const u8 sku_mapping[] = {
2019
#define SKU_FIELD(_type, _field) \
2020
[MT_SKU_##_type] = offsetof(struct mt76_power_limits, _field)
2021
SKU_FIELD(CCK_1_2, cck[0]),
2022
SKU_FIELD(CCK_55_11, cck[2]),
2023
SKU_FIELD(OFDM_6_9, ofdm[0]),
2024
SKU_FIELD(OFDM_12_18, ofdm[2]),
2025
SKU_FIELD(OFDM_24_36, ofdm[4]),
2026
SKU_FIELD(OFDM_48, ofdm[6]),
2027
SKU_FIELD(OFDM_54, ofdm[7]),
2028
SKU_FIELD(HT20_0_8, mcs[0][0]),
2029
SKU_FIELD(HT20_32, ofdm[0]),
2030
SKU_FIELD(HT20_1_2_9_10, mcs[0][1]),
2031
SKU_FIELD(HT20_3_4_11_12, mcs[0][3]),
2032
SKU_FIELD(HT20_5_13, mcs[0][5]),
2033
SKU_FIELD(HT20_6_14, mcs[0][6]),
2034
SKU_FIELD(HT20_7_15, mcs[0][7]),
2035
SKU_FIELD(HT40_0_8, mcs[1][0]),
2036
SKU_FIELD(HT40_32, ofdm[0]),
2037
SKU_FIELD(HT40_1_2_9_10, mcs[1][1]),
2038
SKU_FIELD(HT40_3_4_11_12, mcs[1][3]),
2039
SKU_FIELD(HT40_5_13, mcs[1][5]),
2040
SKU_FIELD(HT40_6_14, mcs[1][6]),
2041
SKU_FIELD(HT40_7_15, mcs[1][7]),
2042
SKU_FIELD(VHT20_0, mcs[0][0]),
2043
SKU_FIELD(VHT20_1_2, mcs[0][1]),
2044
SKU_FIELD(VHT20_3_4, mcs[0][3]),
2045
SKU_FIELD(VHT20_5_6, mcs[0][5]),
2046
SKU_FIELD(VHT20_7, mcs[0][7]),
2047
SKU_FIELD(VHT20_8, mcs[0][8]),
2048
SKU_FIELD(VHT20_9, mcs[0][9]),
2049
SKU_FIELD(VHT40_0, mcs[1][0]),
2050
SKU_FIELD(VHT40_1_2, mcs[1][1]),
2051
SKU_FIELD(VHT40_3_4, mcs[1][3]),
2052
SKU_FIELD(VHT40_5_6, mcs[1][5]),
2053
SKU_FIELD(VHT40_7, mcs[1][7]),
2054
SKU_FIELD(VHT40_8, mcs[1][8]),
2055
SKU_FIELD(VHT40_9, mcs[1][9]),
2056
SKU_FIELD(VHT80_0, mcs[2][0]),
2057
SKU_FIELD(VHT80_1_2, mcs[2][1]),
2058
SKU_FIELD(VHT80_3_4, mcs[2][3]),
2059
SKU_FIELD(VHT80_5_6, mcs[2][5]),
2060
SKU_FIELD(VHT80_7, mcs[2][7]),
2061
SKU_FIELD(VHT80_8, mcs[2][8]),
2062
SKU_FIELD(VHT80_9, mcs[2][9]),
2063
SKU_FIELD(VHT160_0, mcs[3][0]),
2064
SKU_FIELD(VHT160_1_2, mcs[3][1]),
2065
SKU_FIELD(VHT160_3_4, mcs[3][3]),
2066
SKU_FIELD(VHT160_5_6, mcs[3][5]),
2067
SKU_FIELD(VHT160_7, mcs[3][7]),
2068
SKU_FIELD(VHT160_8, mcs[3][8]),
2069
SKU_FIELD(VHT160_9, mcs[3][9]),
2070
#undef SKU_FIELD
2071
};
2072
2073
tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power);
2074
tx_power -= mt76_tx_power_path_delta(n_chains);
2075
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
2076
&limits, tx_power);
2077
mphy->txpower_cur = tx_power;
2078
2079
if (is_mt7663(mphy->dev)) {
2080
memset(sku, tx_power, MT_SKU_4SS_DELTA + 1);
2081
return;
2082
}
2083
2084
for (i = 0; i < MT_SKU_1SS_DELTA; i++)
2085
sku[i] = limits_array[sku_mapping[i]];
2086
2087
for (i = 0; i < 4; i++) {
2088
int delta = 0;
2089
2090
if (i < n_chains - 1)
2091
delta = mt76_tx_power_path_delta(n_chains) -
2092
mt76_tx_power_path_delta(i + 1);
2093
sku[MT_SKU_1SS_DELTA + i] = delta;
2094
}
2095
}
2096
2097
static u8 mt7615_mcu_chan_bw(struct cfg80211_chan_def *chandef)
2098
{
2099
static const u8 width_to_bw[] = {
2100
[NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
2101
[NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
2102
[NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
2103
[NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
2104
[NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
2105
[NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
2106
[NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
2107
[NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
2108
};
2109
2110
if (chandef->width >= ARRAY_SIZE(width_to_bw))
2111
return 0;
2112
2113
return width_to_bw[chandef->width];
2114
}
2115
2116
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
2117
{
2118
struct mt7615_dev *dev = phy->dev;
2119
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2120
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
2121
struct {
2122
u8 control_chan;
2123
u8 center_chan;
2124
u8 bw;
2125
u8 tx_streams;
2126
u8 rx_streams_mask;
2127
u8 switch_reason;
2128
u8 band_idx;
2129
/* for 80+80 only */
2130
u8 center_chan2;
2131
__le16 cac_case;
2132
u8 channel_band;
2133
u8 rsv0;
2134
__le32 outband_freq;
2135
u8 txpower_drop;
2136
u8 rsv1[3];
2137
u8 txpower_sku[53];
2138
u8 rsv2[3];
2139
} req = {
2140
.control_chan = chandef->chan->hw_value,
2141
.center_chan = ieee80211_frequency_to_channel(freq1),
2142
.tx_streams = hweight8(phy->mt76->antenna_mask),
2143
.rx_streams_mask = phy->mt76->chainmask,
2144
.center_chan2 = ieee80211_frequency_to_channel(freq2),
2145
};
2146
2147
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
2148
phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
2149
req.switch_reason = CH_SWITCH_NORMAL;
2150
else if (phy->mt76->offchannel)
2151
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
2152
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
2153
NL80211_IFTYPE_AP))
2154
req.switch_reason = CH_SWITCH_DFS;
2155
else
2156
req.switch_reason = CH_SWITCH_NORMAL;
2157
2158
req.band_idx = phy != &dev->phy;
2159
req.bw = mt7615_mcu_chan_bw(chandef);
2160
2161
if (mt76_testmode_enabled(phy->mt76))
2162
memset(req.txpower_sku, 0x3f, 49);
2163
else
2164
mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
2165
2166
return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
2167
}
2168
2169
int mt7615_mcu_get_temperature(struct mt7615_dev *dev)
2170
{
2171
struct {
2172
u8 action;
2173
u8 rsv[3];
2174
} req = {};
2175
2176
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL),
2177
&req, sizeof(req), true);
2178
}
2179
2180
int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
2181
u32 val)
2182
{
2183
struct {
2184
u8 test_mode_en;
2185
u8 param_idx;
2186
u8 _rsv[2];
2187
2188
__le32 value;
2189
2190
u8 pad[8];
2191
} req = {
2192
.test_mode_en = test_mode,
2193
.param_idx = param,
2194
.value = cpu_to_le32(val),
2195
};
2196
2197
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL),
2198
&req, sizeof(req), false);
2199
}
2200
2201
int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
2202
{
2203
struct mt7615_dev *dev = phy->dev;
2204
struct {
2205
u8 format_id;
2206
u8 sku_enable;
2207
u8 band_idx;
2208
u8 rsv;
2209
} req = {
2210
.format_id = 0,
2211
.band_idx = phy != &dev->phy,
2212
.sku_enable = enable,
2213
};
2214
2215
return mt76_mcu_send_msg(&dev->mt76,
2216
MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
2217
&req, sizeof(req), true);
2218
}
2219
2220
static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
2221
{
2222
int i;
2223
2224
for (i = 0; i < n_freqs; i++)
2225
if (cur == freqs[i])
2226
return i;
2227
2228
return -1;
2229
}
2230
2231
static int mt7615_dcoc_freq_idx(u16 freq, u8 bw)
2232
{
2233
static const u16 freq_list[] = {
2234
4980, 5805, 5905, 5190,
2235
5230, 5270, 5310, 5350,
2236
5390, 5430, 5470, 5510,
2237
5550, 5590, 5630, 5670,
2238
5710, 5755, 5795, 5835,
2239
5875, 5210, 5290, 5370,
2240
5450, 5530, 5610, 5690,
2241
5775, 5855
2242
};
2243
static const u16 freq_bw40[] = {
2244
5190, 5230, 5270, 5310,
2245
5350, 5390, 5430, 5470,
2246
5510, 5550, 5590, 5630,
2247
5670, 5710, 5755, 5795,
2248
5835, 5875
2249
};
2250
int offset_2g = ARRAY_SIZE(freq_list);
2251
int idx;
2252
2253
if (freq < 4000) {
2254
if (freq < 2427)
2255
return offset_2g;
2256
if (freq < 2442)
2257
return offset_2g + 1;
2258
if (freq < 2457)
2259
return offset_2g + 2;
2260
2261
return offset_2g + 3;
2262
}
2263
2264
switch (bw) {
2265
case NL80211_CHAN_WIDTH_80:
2266
case NL80211_CHAN_WIDTH_80P80:
2267
case NL80211_CHAN_WIDTH_160:
2268
break;
2269
default:
2270
idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
2271
freq + 10);
2272
if (idx >= 0) {
2273
freq = freq_bw40[idx];
2274
break;
2275
}
2276
2277
idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
2278
freq - 10);
2279
if (idx >= 0) {
2280
freq = freq_bw40[idx];
2281
break;
2282
}
2283
fallthrough;
2284
case NL80211_CHAN_WIDTH_40:
2285
idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
2286
freq);
2287
if (idx >= 0)
2288
break;
2289
2290
return -1;
2291
2292
}
2293
2294
return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
2295
}
2296
2297
int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy)
2298
{
2299
struct mt7615_dev *dev = phy->dev;
2300
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2301
int freq2 = chandef->center_freq2;
2302
int ret;
2303
struct {
2304
u8 direction;
2305
u8 runtime_calibration;
2306
u8 _rsv[2];
2307
2308
__le16 center_freq;
2309
u8 bw;
2310
u8 band;
2311
u8 is_freq2;
2312
u8 success;
2313
u8 dbdc_en;
2314
2315
u8 _rsv2;
2316
2317
struct {
2318
__le32 sx0_i_lna[4];
2319
__le32 sx0_q_lna[4];
2320
2321
__le32 sx2_i_lna[4];
2322
__le32 sx2_q_lna[4];
2323
} dcoc_data[4];
2324
} req = {
2325
.direction = 1,
2326
2327
.bw = mt7615_mcu_chan_bw(chandef),
2328
.band = chandef->center_freq1 > 4000,
2329
.dbdc_en = !!dev->mt76.phys[MT_BAND1],
2330
};
2331
u16 center_freq = chandef->center_freq1;
2332
int freq_idx;
2333
u8 *eep = dev->mt76.eeprom.data;
2334
2335
if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_RX_CAL))
2336
return 0;
2337
2338
if (chandef->width == NL80211_CHAN_WIDTH_160) {
2339
freq2 = center_freq + 40;
2340
center_freq -= 40;
2341
}
2342
2343
again:
2344
req.runtime_calibration = 1;
2345
freq_idx = mt7615_dcoc_freq_idx(center_freq, chandef->width);
2346
if (freq_idx < 0)
2347
goto out;
2348
2349
memcpy(req.dcoc_data, eep + MT7615_EEPROM_DCOC_OFFSET +
2350
freq_idx * MT7615_EEPROM_DCOC_SIZE,
2351
sizeof(req.dcoc_data));
2352
req.runtime_calibration = 0;
2353
2354
out:
2355
req.center_freq = cpu_to_le16(center_freq);
2356
ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RXDCOC_CAL), &req,
2357
sizeof(req), true);
2358
2359
if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
2360
chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) {
2361
req.is_freq2 = true;
2362
center_freq = freq2;
2363
goto again;
2364
}
2365
2366
return ret;
2367
}
2368
2369
static int mt7615_dpd_freq_idx(u16 freq, u8 bw)
2370
{
2371
static const u16 freq_list[] = {
2372
4920, 4940, 4960, 4980,
2373
5040, 5060, 5080, 5180,
2374
5200, 5220, 5240, 5260,
2375
5280, 5300, 5320, 5340,
2376
5360, 5380, 5400, 5420,
2377
5440, 5460, 5480, 5500,
2378
5520, 5540, 5560, 5580,
2379
5600, 5620, 5640, 5660,
2380
5680, 5700, 5720, 5745,
2381
5765, 5785, 5805, 5825,
2382
5845, 5865, 5885, 5905
2383
};
2384
int offset_2g = ARRAY_SIZE(freq_list);
2385
int idx;
2386
2387
if (freq < 4000) {
2388
if (freq < 2432)
2389
return offset_2g;
2390
if (freq < 2457)
2391
return offset_2g + 1;
2392
2393
return offset_2g + 2;
2394
}
2395
2396
if (bw != NL80211_CHAN_WIDTH_20) {
2397
idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
2398
freq + 10);
2399
if (idx >= 0)
2400
return idx;
2401
2402
idx = mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
2403
freq - 10);
2404
if (idx >= 0)
2405
return idx;
2406
}
2407
2408
return mt7615_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
2409
}
2410
2411
2412
int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy)
2413
{
2414
struct mt7615_dev *dev = phy->dev;
2415
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2416
int freq2 = chandef->center_freq2;
2417
int ret;
2418
struct {
2419
u8 direction;
2420
u8 runtime_calibration;
2421
u8 _rsv[2];
2422
2423
__le16 center_freq;
2424
u8 bw;
2425
u8 band;
2426
u8 is_freq2;
2427
u8 success;
2428
u8 dbdc_en;
2429
2430
u8 _rsv2;
2431
2432
struct {
2433
struct {
2434
u32 dpd_g0;
2435
u8 data[32];
2436
} wf0, wf1;
2437
2438
struct {
2439
u32 dpd_g0_prim;
2440
u32 dpd_g0_sec;
2441
u8 data_prim[32];
2442
u8 data_sec[32];
2443
} wf2, wf3;
2444
} dpd_data;
2445
} req = {
2446
.direction = 1,
2447
2448
.bw = mt7615_mcu_chan_bw(chandef),
2449
.band = chandef->center_freq1 > 4000,
2450
.dbdc_en = !!dev->mt76.phys[MT_BAND1],
2451
};
2452
u16 center_freq = chandef->center_freq1;
2453
int freq_idx;
2454
u8 *eep = dev->mt76.eeprom.data;
2455
2456
if (!(eep[MT_EE_CALDATA_FLASH] & MT_EE_CALDATA_FLASH_TX_DPD))
2457
return 0;
2458
2459
if (chandef->width == NL80211_CHAN_WIDTH_160) {
2460
freq2 = center_freq + 40;
2461
center_freq -= 40;
2462
}
2463
2464
again:
2465
req.runtime_calibration = 1;
2466
freq_idx = mt7615_dpd_freq_idx(center_freq, chandef->width);
2467
if (freq_idx < 0)
2468
goto out;
2469
2470
memcpy(&req.dpd_data, eep + MT7615_EEPROM_TXDPD_OFFSET +
2471
freq_idx * MT7615_EEPROM_TXDPD_SIZE,
2472
sizeof(req.dpd_data));
2473
req.runtime_calibration = 0;
2474
2475
out:
2476
req.center_freq = cpu_to_le16(center_freq);
2477
ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXDPD_CAL),
2478
&req, sizeof(req), true);
2479
2480
if ((chandef->width == NL80211_CHAN_WIDTH_80P80 ||
2481
chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) {
2482
req.is_freq2 = true;
2483
center_freq = freq2;
2484
goto again;
2485
}
2486
2487
return ret;
2488
}
2489
2490
int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev)
2491
{
2492
struct {
2493
u8 operation;
2494
u8 count;
2495
u8 _rsv[2];
2496
u8 index;
2497
u8 enable;
2498
__le16 etype;
2499
} req = {
2500
.operation = 1,
2501
.count = 1,
2502
.enable = 1,
2503
.etype = cpu_to_le16(ETH_P_PAE),
2504
};
2505
2506
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS),
2507
&req, sizeof(req), false);
2508
}
2509
2510
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
2511
bool enable)
2512
{
2513
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
2514
struct {
2515
u8 bss_idx;
2516
u8 dtim_period;
2517
__le16 aid;
2518
__le16 bcn_interval;
2519
__le16 atim_window;
2520
u8 uapsd;
2521
u8 bmc_delivered_ac;
2522
u8 bmc_triggered_ac;
2523
u8 pad;
2524
} req = {
2525
.bss_idx = mvif->mt76.idx,
2526
.aid = cpu_to_le16(vif->cfg.aid),
2527
.dtim_period = vif->bss_conf.dtim_period,
2528
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
2529
};
2530
struct {
2531
u8 bss_idx;
2532
u8 pad[3];
2533
} req_hdr = {
2534
.bss_idx = mvif->mt76.idx,
2535
};
2536
int err;
2537
2538
if (vif->type != NL80211_IFTYPE_STATION)
2539
return 0;
2540
2541
err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
2542
&req_hdr, sizeof(req_hdr), false);
2543
if (err < 0 || !enable)
2544
return err;
2545
2546
return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
2547
&req, sizeof(req), false);
2548
}
2549
2550
int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
2551
struct ieee80211_channel *chan, int duration)
2552
{
2553
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
2554
struct mt7615_dev *dev = phy->dev;
2555
struct mt7615_roc_tlv req = {
2556
.bss_idx = mvif->mt76.idx,
2557
.active = !chan,
2558
.max_interval = cpu_to_le32(duration),
2559
.primary_chan = chan ? chan->hw_value : 0,
2560
.band = chan ? chan->band : 0,
2561
.req_type = 2,
2562
};
2563
2564
phy->roc_grant = false;
2565
2566
return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_ROC),
2567
&req, sizeof(req), false);
2568
}
2569
2570