Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/net/wireless/realtek/rtw88/fw.c
25924 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/* Copyright(c) 2018-2019 Realtek Corporation
3
*/
4
5
#include <linux/iopoll.h>
6
7
#include "main.h"
8
#include "coex.h"
9
#include "fw.h"
10
#include "tx.h"
11
#include "reg.h"
12
#include "sec.h"
13
#include "debug.h"
14
#include "util.h"
15
#include "wow.h"
16
#include "ps.h"
17
#include "phy.h"
18
#include "mac.h"
19
20
static const struct rtw_hw_reg_desc fw_h2c_regs[] = {
21
{REG_FWIMR, MASKDWORD, "FWIMR"},
22
{REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "FWIMR enable"},
23
{REG_FWISR, MASKDWORD, "FWISR"},
24
{REG_FWISR, BIT_FS_H2CCMD_INT, "FWISR enable"},
25
{REG_HMETFR, BIT_INT_BOX_ALL, "BoxBitMap"},
26
{REG_HMEBOX0, MASKDWORD, "MSG 0"},
27
{REG_HMEBOX0_EX, MASKDWORD, "MSG_EX 0"},
28
{REG_HMEBOX1, MASKDWORD, "MSG 1"},
29
{REG_HMEBOX1_EX, MASKDWORD, "MSG_EX 1"},
30
{REG_HMEBOX2, MASKDWORD, "MSG 2"},
31
{REG_HMEBOX2_EX, MASKDWORD, "MSG_EX 2"},
32
{REG_HMEBOX3, MASKDWORD, "MSG 3"},
33
{REG_HMEBOX3_EX, MASKDWORD, "MSG_EX 3"},
34
{REG_FT1IMR, MASKDWORD, "FT1IMR"},
35
{REG_FT1IMR, BIT_FS_H2C_CMD_OK_INT_EN, "FT1IMR enable"},
36
{REG_FT1ISR, MASKDWORD, "FT1ISR"},
37
{REG_FT1ISR, BIT_FS_H2C_CMD_OK_INT, "FT1ISR enable "},
38
};
39
40
static const struct rtw_hw_reg_desc fw_c2h_regs[] = {
41
{REG_FWIMR, MASKDWORD, "FWIMR"},
42
{REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "CPWM"},
43
{REG_FWIMR, BIT_FS_HRCV_INT_EN, "HRECV"},
44
{REG_FWISR, MASKDWORD, "FWISR"},
45
{REG_FWISR, BIT_FS_H2CCMD_INT, "CPWM"},
46
{REG_FWISR, BIT_FS_HRCV_INT, "HRECV"},
47
{REG_CPWM, MASKDWORD, "REG_CPWM"},
48
};
49
50
static const struct rtw_hw_reg_desc fw_core_regs[] = {
51
{REG_ARFR2_V1, MASKDWORD, "EPC"},
52
{REG_ARFRH2_V1, MASKDWORD, "BADADDR"},
53
{REG_ARFR3_V1, MASKDWORD, "CAUSE"},
54
{REG_ARFR3_V1, BIT_EXC_CODE, "ExcCode"},
55
{REG_ARFRH3_V1, MASKDWORD, "Status"},
56
{REG_ARFR4, MASKDWORD, "SP"},
57
{REG_ARFRH4, MASKDWORD, "RA"},
58
{REG_FW_DBG6, MASKDWORD, "DBG 6"},
59
{REG_FW_DBG7, MASKDWORD, "DBG 7"},
60
};
61
62
static void _rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev,
63
const struct rtw_hw_reg_desc regs[], u32 size)
64
{
65
const struct rtw_hw_reg_desc *reg;
66
u32 val;
67
int i;
68
69
for (i = 0; i < size; i++) {
70
reg = &regs[i];
71
val = rtw_read32_mask(rtwdev, reg->addr, reg->mask);
72
73
rtw_dbg(rtwdev, RTW_DBG_FW, "[%s]addr:0x%x mask:0x%x value:0x%x\n",
74
reg->desc, reg->addr, reg->mask, val);
75
}
76
}
77
78
void rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev)
79
{
80
int i;
81
82
if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_FW))
83
return;
84
85
_rtw_fw_dump_dbg_info(rtwdev, fw_h2c_regs, ARRAY_SIZE(fw_h2c_regs));
86
_rtw_fw_dump_dbg_info(rtwdev, fw_c2h_regs, ARRAY_SIZE(fw_c2h_regs));
87
for (i = 0 ; i < RTW_DEBUG_DUMP_TIMES; i++) {
88
rtw_dbg(rtwdev, RTW_DBG_FW, "Firmware Coredump %dth\n", i + 1);
89
_rtw_fw_dump_dbg_info(rtwdev, fw_core_regs, ARRAY_SIZE(fw_core_regs));
90
}
91
}
92
93
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
94
struct sk_buff *skb)
95
{
96
struct rtw_c2h_cmd *c2h;
97
u8 sub_cmd_id;
98
99
c2h = get_c2h_from_skb(skb);
100
sub_cmd_id = c2h->payload[0];
101
102
switch (sub_cmd_id) {
103
case C2H_CCX_RPT:
104
rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
105
break;
106
case C2H_SCAN_STATUS_RPT:
107
rtw_hw_scan_status_report(rtwdev, skb);
108
break;
109
case C2H_CHAN_SWITCH:
110
rtw_hw_scan_chan_switch(rtwdev, skb);
111
break;
112
default:
113
break;
114
}
115
}
116
117
static u16 get_max_amsdu_len(u32 bit_rate)
118
{
119
/* lower than ofdm, do not aggregate */
120
if (bit_rate < 550)
121
return 1;
122
123
/* lower than 20M 2ss mcs8, make it small */
124
if (bit_rate < 1800)
125
return 1200;
126
127
/* lower than 40M 2ss mcs9, make it medium */
128
if (bit_rate < 4000)
129
return 2600;
130
131
/* not yet 80M 2ss mcs8/9, make it twice regular packet size */
132
if (bit_rate < 7000)
133
return 3500;
134
135
/* unlimited */
136
return 0;
137
}
138
139
struct rtw_fw_iter_ra_data {
140
struct rtw_dev *rtwdev;
141
u8 *payload;
142
u8 length;
143
};
144
145
static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
146
{
147
struct rtw_fw_iter_ra_data *ra_data = data;
148
struct rtw_c2h_ra_rpt *ra_rpt = (struct rtw_c2h_ra_rpt *)ra_data->payload;
149
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
150
u8 mac_id, rate, sgi, bw;
151
u8 mcs, nss;
152
u32 bit_rate;
153
154
mac_id = ra_rpt->mac_id;
155
if (si->mac_id != mac_id)
156
return;
157
158
si->ra_report.txrate.flags = 0;
159
160
rate = u8_get_bits(ra_rpt->rate_sgi, RTW_C2H_RA_RPT_RATE);
161
sgi = u8_get_bits(ra_rpt->rate_sgi, RTW_C2H_RA_RPT_SGI);
162
if (ra_data->length >= offsetofend(typeof(*ra_rpt), bw))
163
bw = ra_rpt->bw;
164
else
165
bw = si->bw_mode;
166
167
if (rate < DESC_RATEMCS0) {
168
si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
169
goto legacy;
170
}
171
172
rtw_desc_to_mcsrate(rate, &mcs, &nss);
173
if (rate >= DESC_RATEVHT1SS_MCS0)
174
si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
175
else if (rate >= DESC_RATEMCS0)
176
si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
177
178
if (rate >= DESC_RATEMCS0) {
179
si->ra_report.txrate.mcs = mcs;
180
si->ra_report.txrate.nss = nss;
181
}
182
183
if (sgi)
184
si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
185
186
if (bw == RTW_CHANNEL_WIDTH_80)
187
si->ra_report.txrate.bw = RATE_INFO_BW_80;
188
else if (bw == RTW_CHANNEL_WIDTH_40)
189
si->ra_report.txrate.bw = RATE_INFO_BW_40;
190
else
191
si->ra_report.txrate.bw = RATE_INFO_BW_20;
192
193
legacy:
194
bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
195
196
si->ra_report.desc_rate = rate;
197
si->ra_report.bit_rate = bit_rate;
198
199
sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
200
}
201
202
static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
203
u8 length)
204
{
205
struct rtw_c2h_ra_rpt *ra_rpt = (struct rtw_c2h_ra_rpt *)payload;
206
struct rtw_fw_iter_ra_data ra_data;
207
208
if (WARN(length < rtwdev->chip->c2h_ra_report_size,
209
"invalid ra report c2h length %d\n", length))
210
return;
211
212
rtwdev->dm_info.tx_rate = u8_get_bits(ra_rpt->rate_sgi,
213
RTW_C2H_RA_RPT_RATE);
214
ra_data.rtwdev = rtwdev;
215
ra_data.payload = payload;
216
ra_data.length = length;
217
rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
218
}
219
220
struct rtw_beacon_filter_iter_data {
221
struct rtw_dev *rtwdev;
222
u8 *payload;
223
};
224
225
static void rtw_fw_bcn_filter_notify_vif_iter(void *data,
226
struct ieee80211_vif *vif)
227
{
228
struct rtw_beacon_filter_iter_data *iter_data = data;
229
struct rtw_dev *rtwdev = iter_data->rtwdev;
230
u8 *payload = iter_data->payload;
231
u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
232
u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
233
s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
234
235
switch (type) {
236
case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
237
event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
238
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
239
ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
240
break;
241
case BCN_FILTER_CONNECTION_LOSS:
242
ieee80211_connection_loss(vif);
243
break;
244
case BCN_FILTER_CONNECTED:
245
rtwdev->beacon_loss = false;
246
break;
247
case BCN_FILTER_NOTIFY_BEACON_LOSS:
248
rtwdev->beacon_loss = true;
249
rtw_leave_lps(rtwdev);
250
break;
251
}
252
}
253
254
static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
255
u8 length)
256
{
257
struct rtw_beacon_filter_iter_data dev_iter_data;
258
259
dev_iter_data.rtwdev = rtwdev;
260
dev_iter_data.payload = payload;
261
rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
262
&dev_iter_data);
263
}
264
265
static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
266
u8 length)
267
{
268
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
269
270
dm_info->scan_density = payload[0];
271
272
rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
273
dm_info->scan_density);
274
}
275
276
static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
277
u8 length)
278
{
279
const struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
280
struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
281
282
rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
283
"Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
284
result->density, result->igi, result->l2h_th_init, result->l2h,
285
result->h2l, result->option);
286
287
rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
288
rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
289
edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
290
rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
291
edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
292
293
rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
294
rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
295
"Set" : "Unset");
296
}
297
298
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
299
{
300
struct rtw_c2h_cmd *c2h;
301
u32 pkt_offset;
302
u8 len;
303
304
pkt_offset = *((u32 *)skb->cb);
305
c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
306
len = skb->len - pkt_offset - 2;
307
308
mutex_lock(&rtwdev->mutex);
309
310
if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
311
goto unlock;
312
313
switch (c2h->id) {
314
case C2H_CCX_TX_RPT:
315
rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
316
break;
317
case C2H_BT_INFO:
318
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
319
break;
320
case C2H_BT_HID_INFO:
321
rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
322
break;
323
case C2H_WLAN_INFO:
324
rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
325
break;
326
case C2H_BCN_FILTER_NOTIFY:
327
rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
328
break;
329
case C2H_HALMAC:
330
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
331
break;
332
case C2H_RA_RPT:
333
rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
334
break;
335
case C2H_ADAPTIVITY:
336
rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
337
break;
338
default:
339
rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
340
break;
341
}
342
343
unlock:
344
mutex_unlock(&rtwdev->mutex);
345
}
346
347
void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
348
struct sk_buff *skb)
349
{
350
struct rtw_c2h_cmd *c2h;
351
u8 len;
352
353
c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
354
len = skb->len - pkt_offset - 2;
355
*((u32 *)skb->cb) = pkt_offset;
356
357
rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
358
c2h->id, c2h->seq, len);
359
360
switch (c2h->id) {
361
case C2H_BT_MP_INFO:
362
rtw_coex_info_response(rtwdev, skb);
363
break;
364
case C2H_WLAN_RFON:
365
complete(&rtwdev->lps_leave_check);
366
dev_kfree_skb_any(skb);
367
break;
368
case C2H_SCAN_RESULT:
369
complete(&rtwdev->fw_scan_density);
370
rtw_fw_scan_result(rtwdev, c2h->payload, len);
371
dev_kfree_skb_any(skb);
372
break;
373
default:
374
/* pass offset for further operation */
375
*((u32 *)skb->cb) = pkt_offset;
376
skb_queue_tail(&rtwdev->c2h_queue, skb);
377
ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
378
break;
379
}
380
}
381
EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
382
383
void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
384
{
385
if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
386
rtw_fw_recovery(rtwdev);
387
else
388
rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
389
}
390
EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
391
392
static void rtw_fw_send_h2c_command_register(struct rtw_dev *rtwdev,
393
struct rtw_h2c_register *h2c)
394
{
395
u32 box_reg, box_ex_reg;
396
u8 box_state, box;
397
int ret;
398
399
rtw_dbg(rtwdev, RTW_DBG_FW, "send H2C content %08x %08x\n", h2c->w0,
400
h2c->w1);
401
402
lockdep_assert_held(&rtwdev->mutex);
403
404
box = rtwdev->h2c.last_box_num;
405
switch (box) {
406
case 0:
407
box_reg = REG_HMEBOX0;
408
box_ex_reg = REG_HMEBOX0_EX;
409
break;
410
case 1:
411
box_reg = REG_HMEBOX1;
412
box_ex_reg = REG_HMEBOX1_EX;
413
break;
414
case 2:
415
box_reg = REG_HMEBOX2;
416
box_ex_reg = REG_HMEBOX2_EX;
417
break;
418
case 3:
419
box_reg = REG_HMEBOX3;
420
box_ex_reg = REG_HMEBOX3_EX;
421
break;
422
default:
423
WARN(1, "invalid h2c mail box number\n");
424
return;
425
}
426
427
ret = read_poll_timeout_atomic(rtw_read8, box_state,
428
!((box_state >> box) & 0x1), 100, 3000,
429
false, rtwdev, REG_HMETFR);
430
431
if (ret) {
432
rtw_err(rtwdev, "failed to send h2c command\n");
433
rtw_fw_dump_dbg_info(rtwdev);
434
return;
435
}
436
437
rtw_write32(rtwdev, box_ex_reg, h2c->w1);
438
rtw_write32(rtwdev, box_reg, h2c->w0);
439
440
if (++rtwdev->h2c.last_box_num >= 4)
441
rtwdev->h2c.last_box_num = 0;
442
}
443
444
static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
445
u8 *h2c)
446
{
447
struct rtw_h2c_cmd *h2c_cmd = (struct rtw_h2c_cmd *)h2c;
448
u8 box;
449
u8 box_state;
450
u32 box_reg, box_ex_reg;
451
int ret;
452
453
rtw_dbg(rtwdev, RTW_DBG_FW,
454
"send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
455
h2c[3], h2c[2], h2c[1], h2c[0],
456
h2c[7], h2c[6], h2c[5], h2c[4]);
457
458
lockdep_assert_held(&rtwdev->mutex);
459
460
box = rtwdev->h2c.last_box_num;
461
switch (box) {
462
case 0:
463
box_reg = REG_HMEBOX0;
464
box_ex_reg = REG_HMEBOX0_EX;
465
break;
466
case 1:
467
box_reg = REG_HMEBOX1;
468
box_ex_reg = REG_HMEBOX1_EX;
469
break;
470
case 2:
471
box_reg = REG_HMEBOX2;
472
box_ex_reg = REG_HMEBOX2_EX;
473
break;
474
case 3:
475
box_reg = REG_HMEBOX3;
476
box_ex_reg = REG_HMEBOX3_EX;
477
break;
478
default:
479
WARN(1, "invalid h2c mail box number\n");
480
return;
481
}
482
483
ret = read_poll_timeout_atomic(rtw_read8, box_state,
484
!((box_state >> box) & 0x1), 100, 3000,
485
false, rtwdev, REG_HMETFR);
486
487
if (ret) {
488
rtw_err(rtwdev, "failed to send h2c command\n");
489
return;
490
}
491
492
rtw_write32(rtwdev, box_ex_reg, le32_to_cpu(h2c_cmd->msg_ext));
493
rtw_write32(rtwdev, box_reg, le32_to_cpu(h2c_cmd->msg));
494
495
if (++rtwdev->h2c.last_box_num >= 4)
496
rtwdev->h2c.last_box_num = 0;
497
}
498
499
void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
500
{
501
rtw_fw_send_h2c_command(rtwdev, h2c);
502
}
503
504
static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
505
{
506
int ret;
507
508
lockdep_assert_held(&rtwdev->mutex);
509
510
FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
511
ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
512
if (ret)
513
rtw_err(rtwdev, "failed to send h2c packet\n");
514
rtwdev->h2c.seq++;
515
}
516
517
void
518
rtw_fw_send_general_info(struct rtw_dev *rtwdev)
519
{
520
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
521
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
522
u16 total_size = H2C_PKT_HDR_SIZE + 4;
523
524
if (rtw_chip_wcpu_8051(rtwdev))
525
return;
526
527
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
528
529
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
530
531
GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
532
fifo->rsvd_fw_txbuf_addr -
533
fifo->rsvd_boundary);
534
535
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
536
}
537
538
void
539
rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
540
{
541
struct rtw_hal *hal = &rtwdev->hal;
542
struct rtw_efuse *efuse = &rtwdev->efuse;
543
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
544
u16 total_size = H2C_PKT_HDR_SIZE + 8;
545
u8 fw_rf_type = 0;
546
547
if (rtw_chip_wcpu_8051(rtwdev))
548
return;
549
550
if (hal->rf_type == RF_1T1R)
551
fw_rf_type = FW_RF_1T1R;
552
else if (hal->rf_type == RF_2T2R)
553
fw_rf_type = FW_RF_2T2R;
554
555
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
556
557
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
558
PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
559
PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
560
PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
561
PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
562
PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
563
564
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
565
}
566
567
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
568
{
569
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
570
u16 total_size = H2C_PKT_HDR_SIZE + 1;
571
572
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
573
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
574
IQK_SET_CLEAR(h2c_pkt, para->clear);
575
IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
576
577
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
578
}
579
EXPORT_SYMBOL(rtw_fw_do_iqk);
580
581
void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start)
582
{
583
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
584
585
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION);
586
587
RFK_SET_INFORM_START(h2c_pkt, start);
588
589
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
590
}
591
EXPORT_SYMBOL(rtw_fw_inform_rfk_status);
592
593
void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
594
{
595
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
596
597
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
598
599
SET_QUERY_BT_INFO(h2c_pkt, true);
600
601
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
602
}
603
604
void rtw_fw_default_port(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
605
{
606
struct rtw_h2c_register h2c = {};
607
608
if (rtwvif->net_type != RTW_NET_MGD_LINKED)
609
return;
610
611
/* Leave LPS before default port H2C so FW timer is correct */
612
rtw_leave_lps(rtwdev);
613
614
h2c.w0 = u32_encode_bits(H2C_CMD_DEFAULT_PORT, RTW_H2C_W0_CMDID) |
615
u32_encode_bits(rtwvif->port, RTW_H2C_DEFAULT_PORT_W0_PORTID) |
616
u32_encode_bits(rtwvif->mac_id, RTW_H2C_DEFAULT_PORT_W0_MACID);
617
618
rtw_fw_send_h2c_command_register(rtwdev, &h2c);
619
}
620
621
void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
622
{
623
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
624
625
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
626
627
SET_WL_CH_INFO_LINK(h2c_pkt, link);
628
SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
629
SET_WL_CH_INFO_BW(h2c_pkt, bw);
630
631
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
632
}
633
634
void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
635
struct rtw_coex_info_req *req)
636
{
637
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
638
639
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
640
641
SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
642
SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
643
SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
644
SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
645
SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
646
647
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
648
}
649
650
void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
651
{
652
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
653
u8 index = 0 - bt_pwr_dec_lvl;
654
655
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
656
657
SET_BT_TX_POWER_INDEX(h2c_pkt, index);
658
659
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
660
}
661
662
void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
663
{
664
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
665
666
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
667
668
SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
669
670
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
671
}
672
673
void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
674
u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
675
{
676
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
677
678
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
679
680
SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
681
SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
682
SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
683
SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
684
SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
685
686
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
687
}
688
689
void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
690
{
691
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
692
693
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
694
695
SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
696
SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
697
698
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
699
}
700
701
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
702
{
703
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
704
705
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
706
707
SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
708
709
SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
710
SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
711
SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
712
SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
713
SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
714
715
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
716
}
717
718
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
719
{
720
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
721
u8 rssi = ewma_rssi_read(&si->avg_rssi);
722
bool stbc_en = si->stbc_en ? true : false;
723
724
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
725
726
SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
727
SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
728
SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
729
730
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
731
}
732
733
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
734
bool reset_ra_mask)
735
{
736
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
737
bool disable_pt = true;
738
u32 mask_hi;
739
740
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
741
742
SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
743
SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
744
SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
745
SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
746
SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
747
SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
748
SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask);
749
SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
750
SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
751
SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
752
SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
753
SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
754
SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
755
756
si->init_ra_lv = 0;
757
758
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
759
760
if (rtwdev->chip->id != RTW_CHIP_TYPE_8814A)
761
return;
762
763
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO_HI);
764
765
mask_hi = si->ra_mask >> 32;
766
767
SET_RA_INFO_RA_MASK0(h2c_pkt, (mask_hi & 0xff));
768
SET_RA_INFO_RA_MASK1(h2c_pkt, (mask_hi & 0xff00) >> 8);
769
SET_RA_INFO_RA_MASK2(h2c_pkt, (mask_hi & 0xff0000) >> 16);
770
SET_RA_INFO_RA_MASK3(h2c_pkt, (mask_hi & 0xff000000) >> 24);
771
772
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
773
}
774
775
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
776
{
777
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
778
779
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
780
MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
781
MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
782
783
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
784
}
785
786
void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
787
{
788
struct rtw_traffic_stats *stats = &rtwdev->stats;
789
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
790
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
791
792
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO);
793
SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput);
794
SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput);
795
SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate);
796
SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate);
797
SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]);
798
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
799
}
800
801
void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
802
struct ieee80211_vif *vif)
803
{
804
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
805
struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
806
static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
807
struct rtw_sta_info *si =
808
sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
809
s32 thold = RTW_DEFAULT_CQM_THOLD;
810
u32 hyst = RTW_DEFAULT_CQM_HYST;
811
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
812
813
if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
814
return;
815
816
if (bss_conf->cqm_rssi_thold)
817
thold = bss_conf->cqm_rssi_thold;
818
if (bss_conf->cqm_rssi_hyst)
819
hyst = bss_conf->cqm_rssi_hyst;
820
821
if (!connect) {
822
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
823
SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
824
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
825
826
return;
827
}
828
829
if (!si)
830
return;
831
832
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
833
ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
834
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
835
836
memset(h2c_pkt, 0, sizeof(h2c_pkt));
837
thold = clamp_t(s32, thold + rssi_offset, rssi_min, rssi_max);
838
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
839
SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
840
SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
841
BCN_FILTER_OFFLOAD_MODE_DEFAULT);
842
SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, thold);
843
SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
844
SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
845
SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, hyst);
846
SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
847
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
848
}
849
850
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
851
{
852
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
853
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
854
855
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
856
857
SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
858
SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
859
SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
860
SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
861
SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
862
SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
863
864
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
865
}
866
867
void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
868
{
869
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
870
struct rtw_fw_wow_keep_alive_para mode = {
871
.adopt = true,
872
.pkt_type = KEEP_ALIVE_NULL_PKT,
873
.period = 5,
874
};
875
876
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
877
SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
878
SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
879
SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
880
SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
881
882
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
883
}
884
885
void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
886
{
887
struct rtw_wow_param *rtw_wow = &rtwdev->wow;
888
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
889
struct rtw_fw_wow_disconnect_para mode = {
890
.adopt = true,
891
.period = 30,
892
.retry_count = 5,
893
};
894
895
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
896
897
if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
898
SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
899
SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
900
SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
901
SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
902
}
903
904
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
905
}
906
907
void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
908
{
909
struct rtw_wow_param *rtw_wow = &rtwdev->wow;
910
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
911
912
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
913
914
SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
915
if (rtw_wow_mgd_linked(rtwdev)) {
916
if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
917
SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
918
if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
919
SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
920
if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
921
SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
922
if (rtw_wow->pattern_cnt)
923
SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
924
}
925
926
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
927
}
928
929
void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
930
u8 pairwise_key_enc,
931
u8 group_key_enc)
932
{
933
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
934
935
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
936
937
SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
938
SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
939
940
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
941
}
942
943
void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
944
{
945
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
946
947
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
948
949
SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
950
951
if (rtw_wow_no_link(rtwdev))
952
SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
953
954
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
955
}
956
957
static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
958
enum rtw_rsvd_packet_type type)
959
{
960
struct rtw_rsvd_page *rsvd_pkt;
961
u8 location = 0;
962
963
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
964
if (type == rsvd_pkt->type)
965
location = rsvd_pkt->page;
966
}
967
968
return location;
969
}
970
971
void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
972
{
973
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
974
u8 loc_nlo;
975
976
loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
977
978
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
979
980
SET_NLO_FUN_EN(h2c_pkt, enable);
981
if (enable) {
982
if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
983
SET_NLO_PS_32K(h2c_pkt, enable);
984
SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
985
SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
986
}
987
988
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
989
}
990
991
void rtw_fw_set_recover_bt_device(struct rtw_dev *rtwdev)
992
{
993
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
994
995
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RECOVER_BT_DEV);
996
SET_RECOVER_BT_DEV_EN(h2c_pkt, 1);
997
998
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
999
}
1000
1001
void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
1002
{
1003
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
1004
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1005
u8 loc_pg, loc_dpk;
1006
1007
loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
1008
loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
1009
1010
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
1011
1012
LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
1013
LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
1014
LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
1015
LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
1016
1017
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1018
}
1019
1020
static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
1021
struct cfg80211_ssid *ssid)
1022
{
1023
struct rtw_rsvd_page *rsvd_pkt;
1024
u8 location = 0;
1025
1026
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1027
if (rsvd_pkt->type != RSVD_PROBE_REQ)
1028
continue;
1029
if ((!ssid && !rsvd_pkt->ssid) ||
1030
cfg80211_ssid_eq(rsvd_pkt->ssid, ssid))
1031
location = rsvd_pkt->page;
1032
}
1033
1034
return location;
1035
}
1036
1037
static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
1038
struct cfg80211_ssid *ssid)
1039
{
1040
struct rtw_rsvd_page *rsvd_pkt;
1041
u16 size = 0;
1042
1043
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1044
if (rsvd_pkt->type != RSVD_PROBE_REQ)
1045
continue;
1046
if ((!ssid && !rsvd_pkt->ssid) ||
1047
cfg80211_ssid_eq(rsvd_pkt->ssid, ssid))
1048
size = rsvd_pkt->probe_req_size;
1049
}
1050
1051
return size;
1052
}
1053
1054
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
1055
{
1056
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1057
u8 location = 0;
1058
1059
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
1060
1061
location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
1062
*(h2c_pkt + 1) = location;
1063
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
1064
1065
location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
1066
*(h2c_pkt + 2) = location;
1067
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
1068
1069
location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
1070
*(h2c_pkt + 3) = location;
1071
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
1072
1073
location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
1074
*(h2c_pkt + 4) = location;
1075
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
1076
1077
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1078
}
1079
1080
static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
1081
{
1082
struct rtw_dev *rtwdev = hw->priv;
1083
const struct rtw_chip_info *chip = rtwdev->chip;
1084
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
1085
struct rtw_nlo_info_hdr *nlo_hdr;
1086
struct cfg80211_ssid *ssid;
1087
struct sk_buff *skb;
1088
u8 *pos, loc;
1089
u32 size;
1090
int i;
1091
1092
if (!pno_req->inited || !pno_req->match_set_cnt)
1093
return NULL;
1094
1095
size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
1096
IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
1097
1098
skb = alloc_skb(size, GFP_KERNEL);
1099
if (!skb)
1100
return NULL;
1101
1102
skb_reserve(skb, chip->tx_pkt_desc_sz);
1103
1104
nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
1105
1106
nlo_hdr->nlo_count = pno_req->match_set_cnt;
1107
nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
1108
1109
/* pattern check for firmware */
1110
memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
1111
1112
for (i = 0; i < pno_req->match_set_cnt; i++)
1113
nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
1114
1115
for (i = 0; i < pno_req->match_set_cnt; i++) {
1116
ssid = &pno_req->match_sets[i].ssid;
1117
loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
1118
if (!loc) {
1119
rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
1120
kfree_skb(skb);
1121
return NULL;
1122
}
1123
nlo_hdr->location[i] = loc;
1124
}
1125
1126
for (i = 0; i < pno_req->match_set_cnt; i++) {
1127
pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
1128
memcpy(pos, pno_req->match_sets[i].ssid.ssid,
1129
pno_req->match_sets[i].ssid.ssid_len);
1130
}
1131
1132
return skb;
1133
}
1134
1135
static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
1136
{
1137
struct rtw_dev *rtwdev = hw->priv;
1138
const struct rtw_chip_info *chip = rtwdev->chip;
1139
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
1140
struct ieee80211_channel *channels = pno_req->channels;
1141
struct sk_buff *skb;
1142
int count = pno_req->channel_cnt;
1143
u8 *pos;
1144
int i = 0;
1145
1146
skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
1147
if (!skb)
1148
return NULL;
1149
1150
skb_reserve(skb, chip->tx_pkt_desc_sz);
1151
1152
for (i = 0; i < count; i++) {
1153
pos = skb_put_zero(skb, 4);
1154
1155
CHSW_INFO_SET_CH(pos, channels[i].hw_value);
1156
1157
if (channels[i].flags & IEEE80211_CHAN_RADAR)
1158
CHSW_INFO_SET_ACTION_ID(pos, 0);
1159
else
1160
CHSW_INFO_SET_ACTION_ID(pos, 1);
1161
CHSW_INFO_SET_TIMEOUT(pos, 1);
1162
CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
1163
CHSW_INFO_SET_BW(pos, 0);
1164
}
1165
1166
return skb;
1167
}
1168
1169
static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
1170
{
1171
struct rtw_dev *rtwdev = hw->priv;
1172
const struct rtw_chip_info *chip = rtwdev->chip;
1173
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
1174
struct rtw_lps_pg_dpk_hdr *dpk_hdr;
1175
struct sk_buff *skb;
1176
u32 size;
1177
1178
size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
1179
skb = alloc_skb(size, GFP_KERNEL);
1180
if (!skb)
1181
return NULL;
1182
1183
skb_reserve(skb, chip->tx_pkt_desc_sz);
1184
dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
1185
dpk_hdr->dpk_ch = dpk_info->dpk_ch;
1186
dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
1187
memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
1188
memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
1189
memcpy(dpk_hdr->coef, dpk_info->coef, 160);
1190
1191
return skb;
1192
}
1193
1194
static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
1195
{
1196
struct rtw_dev *rtwdev = hw->priv;
1197
const struct rtw_chip_info *chip = rtwdev->chip;
1198
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
1199
struct rtw_lps_pg_info_hdr *pg_info_hdr;
1200
struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1201
struct sk_buff *skb;
1202
u32 size;
1203
1204
size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
1205
skb = alloc_skb(size, GFP_KERNEL);
1206
if (!skb)
1207
return NULL;
1208
1209
skb_reserve(skb, chip->tx_pkt_desc_sz);
1210
pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
1211
pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
1212
pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
1213
pg_info_hdr->sec_cam_count =
1214
rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
1215
pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
1216
1217
conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
1218
conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
1219
1220
return skb;
1221
}
1222
1223
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
1224
struct rtw_rsvd_page *rsvd_pkt)
1225
{
1226
struct ieee80211_vif *vif;
1227
struct rtw_vif *rtwvif;
1228
struct sk_buff *skb_new;
1229
struct cfg80211_ssid *ssid;
1230
u16 tim_offset = 0;
1231
1232
if (rsvd_pkt->type == RSVD_DUMMY) {
1233
skb_new = alloc_skb(1, GFP_KERNEL);
1234
if (!skb_new)
1235
return NULL;
1236
1237
skb_put(skb_new, 1);
1238
return skb_new;
1239
}
1240
1241
rtwvif = rsvd_pkt->rtwvif;
1242
if (!rtwvif)
1243
return NULL;
1244
1245
vif = rtwvif_to_vif(rtwvif);
1246
1247
switch (rsvd_pkt->type) {
1248
case RSVD_BEACON:
1249
skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
1250
rsvd_pkt->tim_offset = tim_offset;
1251
break;
1252
case RSVD_PS_POLL:
1253
skb_new = ieee80211_pspoll_get(hw, vif);
1254
break;
1255
case RSVD_PROBE_RESP:
1256
skb_new = ieee80211_proberesp_get(hw, vif);
1257
break;
1258
case RSVD_NULL:
1259
skb_new = ieee80211_nullfunc_get(hw, vif, -1, false);
1260
break;
1261
case RSVD_QOS_NULL:
1262
skb_new = ieee80211_nullfunc_get(hw, vif, -1, true);
1263
break;
1264
case RSVD_LPS_PG_DPK:
1265
skb_new = rtw_lps_pg_dpk_get(hw);
1266
break;
1267
case RSVD_LPS_PG_INFO:
1268
skb_new = rtw_lps_pg_info_get(hw);
1269
break;
1270
case RSVD_PROBE_REQ:
1271
ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
1272
if (ssid)
1273
skb_new = ieee80211_probereq_get(hw, vif->addr,
1274
ssid->ssid,
1275
ssid->ssid_len, 0);
1276
else
1277
skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
1278
if (skb_new)
1279
rsvd_pkt->probe_req_size = (u16)skb_new->len;
1280
break;
1281
case RSVD_NLO_INFO:
1282
skb_new = rtw_nlo_info_get(hw);
1283
break;
1284
case RSVD_CH_INFO:
1285
skb_new = rtw_cs_channel_info_get(hw);
1286
break;
1287
default:
1288
return NULL;
1289
}
1290
1291
if (!skb_new)
1292
return NULL;
1293
1294
return skb_new;
1295
}
1296
1297
static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
1298
enum rtw_rsvd_packet_type type)
1299
{
1300
struct rtw_tx_pkt_info pkt_info = {0};
1301
const struct rtw_chip_info *chip = rtwdev->chip;
1302
u8 *pkt_desc;
1303
1304
rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
1305
pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
1306
memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
1307
rtw_tx_fill_tx_desc(rtwdev, &pkt_info, skb);
1308
}
1309
1310
static inline u8 rtw_len_to_page(unsigned int len, u16 page_size)
1311
{
1312
return DIV_ROUND_UP(len, page_size);
1313
}
1314
1315
static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u16 page_size,
1316
u16 page_margin, u32 page, u8 *buf,
1317
struct rtw_rsvd_page *rsvd_pkt)
1318
{
1319
struct sk_buff *skb = rsvd_pkt->skb;
1320
1321
if (page >= 1)
1322
memcpy(buf + page_margin + page_size * (page - 1),
1323
skb->data, skb->len);
1324
else
1325
memcpy(buf, skb->data, skb->len);
1326
}
1327
1328
static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
1329
enum rtw_rsvd_packet_type type,
1330
bool txdesc)
1331
{
1332
struct rtw_rsvd_page *rsvd_pkt = NULL;
1333
1334
rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
1335
1336
if (!rsvd_pkt)
1337
return NULL;
1338
1339
INIT_LIST_HEAD(&rsvd_pkt->vif_list);
1340
INIT_LIST_HEAD(&rsvd_pkt->build_list);
1341
rsvd_pkt->type = type;
1342
rsvd_pkt->add_txdesc = txdesc;
1343
1344
return rsvd_pkt;
1345
}
1346
1347
static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
1348
struct rtw_vif *rtwvif,
1349
struct rtw_rsvd_page *rsvd_pkt)
1350
{
1351
lockdep_assert_held(&rtwdev->mutex);
1352
1353
list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
1354
}
1355
1356
static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
1357
struct rtw_vif *rtwvif,
1358
enum rtw_rsvd_packet_type type,
1359
bool txdesc)
1360
{
1361
struct rtw_rsvd_page *rsvd_pkt;
1362
1363
rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
1364
if (!rsvd_pkt) {
1365
rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
1366
return;
1367
}
1368
1369
rsvd_pkt->rtwvif = rtwvif;
1370
rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1371
}
1372
1373
static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
1374
struct rtw_vif *rtwvif,
1375
struct cfg80211_ssid *ssid)
1376
{
1377
struct rtw_rsvd_page *rsvd_pkt;
1378
1379
rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
1380
if (!rsvd_pkt) {
1381
rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
1382
return;
1383
}
1384
1385
rsvd_pkt->rtwvif = rtwvif;
1386
rsvd_pkt->ssid = ssid;
1387
rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1388
}
1389
1390
void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
1391
struct rtw_vif *rtwvif)
1392
{
1393
struct rtw_rsvd_page *rsvd_pkt, *tmp;
1394
1395
lockdep_assert_held(&rtwdev->mutex);
1396
1397
/* remove all of the rsvd pages for vif */
1398
list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
1399
vif_list) {
1400
list_del(&rsvd_pkt->vif_list);
1401
if (!list_empty(&rsvd_pkt->build_list))
1402
list_del(&rsvd_pkt->build_list);
1403
kfree(rsvd_pkt);
1404
}
1405
}
1406
1407
void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
1408
struct rtw_vif *rtwvif)
1409
{
1410
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1411
1412
if (vif->type != NL80211_IFTYPE_AP &&
1413
vif->type != NL80211_IFTYPE_ADHOC &&
1414
vif->type != NL80211_IFTYPE_MESH_POINT) {
1415
rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
1416
vif->type);
1417
return;
1418
}
1419
1420
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
1421
}
1422
1423
void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
1424
struct rtw_vif *rtwvif)
1425
{
1426
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1427
struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1428
struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
1429
struct cfg80211_ssid *ssid;
1430
int i;
1431
1432
if (vif->type != NL80211_IFTYPE_STATION) {
1433
rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
1434
vif->type);
1435
return;
1436
}
1437
1438
for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
1439
ssid = &rtw_pno_req->match_sets[i].ssid;
1440
rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
1441
}
1442
1443
rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
1444
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
1445
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
1446
}
1447
1448
void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
1449
struct rtw_vif *rtwvif)
1450
{
1451
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1452
1453
if (vif->type != NL80211_IFTYPE_STATION) {
1454
rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
1455
vif->type);
1456
return;
1457
}
1458
1459
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
1460
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
1461
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
1462
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
1463
rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
1464
}
1465
1466
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
1467
u8 *buf, u32 size)
1468
{
1469
u8 bckp[3];
1470
u8 val;
1471
u16 rsvd_pg_head;
1472
u32 bcn_valid_addr;
1473
u32 bcn_valid_mask;
1474
int ret;
1475
1476
lockdep_assert_held(&rtwdev->mutex);
1477
1478
if (!size)
1479
return -EINVAL;
1480
1481
bckp[2] = rtw_read8(rtwdev, REG_BCN_CTRL);
1482
1483
if (rtw_chip_wcpu_8051(rtwdev)) {
1484
rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
1485
} else {
1486
pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
1487
pg_addr |= BIT_BCN_VALID_V1;
1488
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
1489
}
1490
1491
val = rtw_read8(rtwdev, REG_CR + 1);
1492
bckp[0] = val;
1493
val |= BIT_ENSWBCN >> 8;
1494
rtw_write8(rtwdev, REG_CR + 1, val);
1495
1496
rtw_write8(rtwdev, REG_BCN_CTRL,
1497
(bckp[2] & ~BIT_EN_BCN_FUNCTION) | BIT_DIS_TSF_UDT);
1498
1499
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
1500
val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
1501
bckp[1] = val;
1502
val &= ~(BIT_EN_BCNQ_DL >> 16);
1503
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
1504
}
1505
1506
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
1507
if (ret) {
1508
rtw_err(rtwdev, "failed to write data to rsvd page\n");
1509
goto restore;
1510
}
1511
1512
if (rtw_chip_wcpu_8051(rtwdev)) {
1513
bcn_valid_addr = REG_DWBCN0_CTRL;
1514
bcn_valid_mask = BIT_BCN_VALID;
1515
} else {
1516
bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
1517
bcn_valid_mask = BIT_BCN_VALID_V1;
1518
}
1519
1520
if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
1521
rtw_err(rtwdev, "error beacon valid\n");
1522
ret = -EBUSY;
1523
}
1524
1525
restore:
1526
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
1527
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
1528
rsvd_pg_head | BIT_BCN_VALID_V1);
1529
rtw_write8(rtwdev, REG_BCN_CTRL, bckp[2]);
1530
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
1531
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
1532
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
1533
1534
return ret;
1535
}
1536
1537
static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
1538
{
1539
u32 pg_size;
1540
u32 pg_num = 0;
1541
u16 pg_addr = 0;
1542
1543
pg_size = rtwdev->chip->page_size;
1544
pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
1545
if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
1546
return -ENOMEM;
1547
1548
pg_addr = rtwdev->fifo.rsvd_drv_addr;
1549
1550
return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
1551
}
1552
1553
static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
1554
{
1555
struct rtw_rsvd_page *rsvd_pkt, *tmp;
1556
1557
list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
1558
build_list) {
1559
list_del_init(&rsvd_pkt->build_list);
1560
1561
/* Don't free except for the dummy rsvd page,
1562
* others will be freed when removing vif
1563
*/
1564
if (rsvd_pkt->type == RSVD_DUMMY)
1565
kfree(rsvd_pkt);
1566
}
1567
}
1568
1569
static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
1570
struct ieee80211_vif *vif)
1571
{
1572
struct rtw_dev *rtwdev = data;
1573
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
1574
struct rtw_rsvd_page *rsvd_pkt;
1575
1576
/* AP not yet started, don't gather its rsvd pages */
1577
if (vif->type == NL80211_IFTYPE_AP && !rtwdev->ap_active)
1578
return;
1579
1580
list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
1581
if (rsvd_pkt->type == RSVD_BEACON)
1582
list_add(&rsvd_pkt->build_list,
1583
&rtwdev->rsvd_page_list);
1584
else
1585
list_add_tail(&rsvd_pkt->build_list,
1586
&rtwdev->rsvd_page_list);
1587
}
1588
}
1589
1590
static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
1591
{
1592
struct rtw_rsvd_page *rsvd_pkt;
1593
1594
__rtw_build_rsvd_page_reset(rtwdev);
1595
1596
/* gather rsvd page from vifs */
1597
rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
1598
1599
rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1600
struct rtw_rsvd_page, build_list);
1601
if (!rsvd_pkt) {
1602
WARN(1, "Should not have an empty reserved page\n");
1603
return -EINVAL;
1604
}
1605
1606
/* the first rsvd should be beacon, otherwise add a dummy one */
1607
if (rsvd_pkt->type != RSVD_BEACON) {
1608
struct rtw_rsvd_page *dummy_pkt;
1609
1610
dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
1611
if (!dummy_pkt) {
1612
rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
1613
return -ENOMEM;
1614
}
1615
1616
list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
1617
}
1618
1619
return 0;
1620
}
1621
1622
static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
1623
{
1624
const struct rtw_chip_info *chip = rtwdev->chip;
1625
struct ieee80211_hw *hw = rtwdev->hw;
1626
struct rtw_rsvd_page *rsvd_pkt;
1627
struct sk_buff *iter;
1628
u16 page_size, page_margin, tx_desc_sz;
1629
u8 total_page = 0;
1630
u32 page = 0;
1631
u8 *buf;
1632
int ret;
1633
1634
page_size = chip->page_size;
1635
tx_desc_sz = chip->tx_pkt_desc_sz;
1636
page_margin = page_size - tx_desc_sz;
1637
1638
ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
1639
if (ret) {
1640
rtw_err(rtwdev,
1641
"failed to build rsvd page from vifs, ret %d\n", ret);
1642
return NULL;
1643
}
1644
1645
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1646
iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1647
if (!iter) {
1648
rtw_err(rtwdev, "failed to build rsvd packet\n");
1649
goto release_skb;
1650
}
1651
1652
/* Fill the tx_desc for the rsvd pkt that requires one.
1653
* And iter->len will be added with size of tx_desc_sz.
1654
*/
1655
if (rsvd_pkt->add_txdesc)
1656
rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
1657
1658
rsvd_pkt->skb = iter;
1659
rsvd_pkt->page = total_page;
1660
1661
/* Reserved page is downloaded via TX path, and TX path will
1662
* generate a tx_desc at the header to describe length of
1663
* the buffer. If we are not counting page numbers with the
1664
* size of tx_desc added at the first rsvd_pkt (usually a
1665
* beacon, firmware default refer to the first page as the
1666
* content of beacon), we could generate a buffer which size
1667
* is smaller than the actual size of the whole rsvd_page
1668
*/
1669
if (total_page == 0) {
1670
if (rsvd_pkt->type != RSVD_BEACON &&
1671
rsvd_pkt->type != RSVD_DUMMY) {
1672
rtw_err(rtwdev, "first page should be a beacon\n");
1673
goto release_skb;
1674
}
1675
total_page += rtw_len_to_page(iter->len + tx_desc_sz,
1676
page_size);
1677
} else {
1678
total_page += rtw_len_to_page(iter->len, page_size);
1679
}
1680
}
1681
1682
if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
1683
rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
1684
goto release_skb;
1685
}
1686
1687
*size = (total_page - 1) * page_size + page_margin;
1688
buf = kzalloc(*size, GFP_KERNEL);
1689
if (!buf)
1690
goto release_skb;
1691
1692
/* Copy the content of each rsvd_pkt to the buf, and they should
1693
* be aligned to the pages.
1694
*
1695
* Note that the first rsvd_pkt is a beacon no matter what vif->type.
1696
* And that rsvd_pkt does not require tx_desc because when it goes
1697
* through TX path, the TX path will generate one for it.
1698
*/
1699
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1700
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
1701
page, buf, rsvd_pkt);
1702
if (page == 0)
1703
page += rtw_len_to_page(rsvd_pkt->skb->len +
1704
tx_desc_sz, page_size);
1705
else
1706
page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
1707
1708
kfree_skb(rsvd_pkt->skb);
1709
rsvd_pkt->skb = NULL;
1710
}
1711
1712
return buf;
1713
1714
release_skb:
1715
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1716
kfree_skb(rsvd_pkt->skb);
1717
rsvd_pkt->skb = NULL;
1718
}
1719
1720
return NULL;
1721
}
1722
1723
static int rtw_download_beacon(struct rtw_dev *rtwdev)
1724
{
1725
struct ieee80211_hw *hw = rtwdev->hw;
1726
struct rtw_rsvd_page *rsvd_pkt;
1727
struct sk_buff *skb;
1728
int ret = 0;
1729
1730
rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1731
struct rtw_rsvd_page, build_list);
1732
if (!rsvd_pkt) {
1733
rtw_err(rtwdev, "failed to get rsvd page from build list\n");
1734
return -ENOENT;
1735
}
1736
1737
if (rsvd_pkt->type != RSVD_BEACON &&
1738
rsvd_pkt->type != RSVD_DUMMY) {
1739
rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
1740
rsvd_pkt->type);
1741
return -EINVAL;
1742
}
1743
1744
skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1745
if (!skb) {
1746
rtw_err(rtwdev, "failed to get beacon skb\n");
1747
return -ENOMEM;
1748
}
1749
1750
ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
1751
if (ret)
1752
rtw_err(rtwdev, "failed to download drv rsvd page\n");
1753
1754
dev_kfree_skb(skb);
1755
1756
return ret;
1757
}
1758
1759
int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
1760
{
1761
u8 *buf;
1762
u32 size;
1763
int ret;
1764
1765
buf = rtw_build_rsvd_page(rtwdev, &size);
1766
if (!buf) {
1767
rtw_err(rtwdev, "failed to build rsvd page pkt\n");
1768
return -ENOMEM;
1769
}
1770
1771
ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
1772
if (ret) {
1773
rtw_err(rtwdev, "failed to download drv rsvd page\n");
1774
goto free;
1775
}
1776
1777
/* The last thing is to download the *ONLY* beacon again, because
1778
* the previous tx_desc is to describe the total rsvd page. Download
1779
* the beacon again to replace the TX desc header, and we will get
1780
* a correct tx_desc for the beacon in the rsvd page.
1781
*/
1782
ret = rtw_download_beacon(rtwdev);
1783
if (ret) {
1784
rtw_err(rtwdev, "failed to download beacon\n");
1785
goto free;
1786
}
1787
1788
free:
1789
kfree(buf);
1790
1791
return ret;
1792
}
1793
1794
void rtw_fw_update_beacon_work(struct work_struct *work)
1795
{
1796
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
1797
update_beacon_work);
1798
1799
mutex_lock(&rtwdev->mutex);
1800
rtw_fw_download_rsvd_page(rtwdev);
1801
rtw_send_rsvd_page_h2c(rtwdev);
1802
mutex_unlock(&rtwdev->mutex);
1803
}
1804
1805
static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
1806
u32 *buf, u32 residue, u16 start_pg)
1807
{
1808
u32 i;
1809
u16 idx = 0;
1810
u16 ctl;
1811
1812
ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
1813
/* disable rx clock gate */
1814
rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
1815
1816
do {
1817
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
1818
1819
for (i = FIFO_DUMP_ADDR + residue;
1820
i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
1821
buf[idx++] = rtw_read32(rtwdev, i);
1822
size -= 4;
1823
if (size == 0)
1824
goto out;
1825
}
1826
1827
residue = 0;
1828
start_pg++;
1829
} while (size);
1830
1831
out:
1832
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
1833
/* restore rx clock gate */
1834
rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
1835
}
1836
1837
static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
1838
u32 offset, u32 size, u32 *buf)
1839
{
1840
const struct rtw_chip_info *chip = rtwdev->chip;
1841
u32 start_pg, residue;
1842
1843
if (sel >= RTW_FW_FIFO_MAX) {
1844
rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
1845
return;
1846
}
1847
if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
1848
offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
1849
residue = offset & (FIFO_PAGE_SIZE - 1);
1850
start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
1851
1852
rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
1853
}
1854
1855
static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
1856
enum rtw_fw_fifo_sel sel,
1857
u32 start_addr, u32 size)
1858
{
1859
switch (sel) {
1860
case RTW_FW_FIFO_SEL_TX:
1861
case RTW_FW_FIFO_SEL_RX:
1862
if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
1863
return false;
1864
fallthrough;
1865
default:
1866
return true;
1867
}
1868
}
1869
1870
int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
1871
u32 *buffer)
1872
{
1873
if (!rtwdev->chip->fw_fifo_addr[0]) {
1874
rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
1875
return -ENOTSUPP;
1876
}
1877
1878
if (size == 0 || !buffer)
1879
return -EINVAL;
1880
1881
if (size & 0x3) {
1882
rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
1883
return -EINVAL;
1884
}
1885
1886
if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
1887
rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
1888
return -EINVAL;
1889
}
1890
1891
rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
1892
1893
return 0;
1894
}
1895
1896
static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
1897
u8 location)
1898
{
1899
const struct rtw_chip_info *chip = rtwdev->chip;
1900
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1901
u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
1902
1903
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
1904
1905
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1906
UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
1907
UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
1908
1909
/* include txdesc size */
1910
size += chip->tx_pkt_desc_sz;
1911
UPDATE_PKT_SET_SIZE(h2c_pkt, size);
1912
1913
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1914
}
1915
1916
void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
1917
struct cfg80211_ssid *ssid)
1918
{
1919
u8 loc;
1920
u16 size;
1921
1922
loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
1923
if (!loc) {
1924
rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
1925
return;
1926
}
1927
1928
size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
1929
if (!size) {
1930
rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
1931
return;
1932
}
1933
1934
__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
1935
}
1936
1937
void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
1938
{
1939
struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
1940
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1941
u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
1942
u8 loc_ch_info;
1943
const struct rtw_ch_switch_option cs_option = {
1944
.dest_ch_en = 1,
1945
.dest_ch = 1,
1946
.periodic_option = 2,
1947
.normal_period = 5,
1948
.normal_period_sel = 0,
1949
.normal_cycle = 10,
1950
.slow_period = 1,
1951
.slow_period_sel = 1,
1952
};
1953
1954
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
1955
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1956
1957
CH_SWITCH_SET_START(h2c_pkt, enable);
1958
CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
1959
CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
1960
CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
1961
CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
1962
CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
1963
CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
1964
CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
1965
CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
1966
1967
CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
1968
CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
1969
1970
loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
1971
CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
1972
1973
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1974
}
1975
1976
void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
1977
{
1978
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
1979
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1980
1981
if (!rtw_edcca_enabled) {
1982
dm_info->edcca_mode = RTW_EDCCA_NORMAL;
1983
rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
1984
"EDCCA disabled by debugfs\n");
1985
}
1986
1987
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
1988
SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
1989
SET_ADAPTIVITY_OPTION(h2c_pkt, 1);
1990
SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
1991
SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
1992
SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
1993
1994
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
1995
}
1996
1997
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
1998
{
1999
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
2000
2001
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
2002
SET_SCAN_START(h2c_pkt, start);
2003
2004
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
2005
}
2006
2007
static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
2008
struct sk_buff_head *list, u8 *bands,
2009
struct rtw_vif *rtwvif)
2010
{
2011
const struct rtw_chip_info *chip = rtwdev->chip;
2012
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
2013
struct sk_buff *new;
2014
u8 idx;
2015
2016
for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
2017
if (!(BIT(idx) & chip->band))
2018
continue;
2019
new = skb_copy(skb, GFP_KERNEL);
2020
if (!new)
2021
return -ENOMEM;
2022
skb_put_data(new, ies->ies[idx], ies->len[idx]);
2023
skb_put_data(new, ies->common_ies, ies->common_ie_len);
2024
skb_queue_tail(list, new);
2025
(*bands)++;
2026
}
2027
2028
return 0;
2029
}
2030
2031
static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
2032
struct sk_buff_head *probe_req_list)
2033
{
2034
const struct rtw_chip_info *chip = rtwdev->chip;
2035
struct sk_buff *skb, *tmp;
2036
u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
2037
u8 tx_desc_sz = chip->tx_pkt_desc_sz;
2038
u16 page_size = chip->page_size;
2039
u8 page_offset = 1, *buf;
2040
u16 buf_offset = page_size * page_offset;
2041
unsigned int pkt_len;
2042
u8 page_cnt, pages;
2043
int ret;
2044
2045
if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
2046
page_cnt = RTW_OLD_PROBE_PG_CNT;
2047
else
2048
page_cnt = RTW_PROBE_PG_CNT;
2049
2050
pages = page_offset + num_probes * page_cnt;
2051
2052
buf = kzalloc(page_size * pages, GFP_KERNEL);
2053
if (!buf)
2054
return -ENOMEM;
2055
2056
buf_offset -= tx_desc_sz;
2057
skb_queue_walk_safe(probe_req_list, skb, tmp) {
2058
skb_unlink(skb, probe_req_list);
2059
rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
2060
if (skb->len > page_size * page_cnt) {
2061
ret = -EINVAL;
2062
goto out;
2063
}
2064
2065
memcpy(buf + buf_offset, skb->data, skb->len);
2066
pkt_len = skb->len - tx_desc_sz;
2067
loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
2068
__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
2069
2070
buf_offset += page_cnt * page_size;
2071
page_offset += page_cnt;
2072
kfree_skb(skb);
2073
}
2074
2075
ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset);
2076
if (ret) {
2077
rtw_err(rtwdev, "Download probe request to firmware failed\n");
2078
goto out;
2079
}
2080
2081
rtwdev->scan_info.probe_pg_size = page_offset;
2082
out:
2083
kfree(buf);
2084
skb_queue_walk_safe(probe_req_list, skb, tmp)
2085
kfree_skb(skb);
2086
2087
return ret;
2088
}
2089
2090
static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
2091
struct rtw_vif *rtwvif)
2092
{
2093
struct cfg80211_scan_request *req = rtwvif->scan_req;
2094
struct sk_buff_head list;
2095
struct sk_buff *skb, *tmp;
2096
u8 num = req->n_ssids, i, bands = 0;
2097
int ret;
2098
2099
skb_queue_head_init(&list);
2100
for (i = 0; i < num; i++) {
2101
skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
2102
req->ssids[i].ssid,
2103
req->ssids[i].ssid_len,
2104
req->ie_len);
2105
if (!skb) {
2106
ret = -ENOMEM;
2107
goto out;
2108
}
2109
ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
2110
rtwvif);
2111
if (ret)
2112
goto out;
2113
2114
kfree_skb(skb);
2115
}
2116
2117
return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
2118
2119
out:
2120
skb_queue_walk_safe(&list, skb, tmp)
2121
kfree_skb(skb);
2122
2123
return ret;
2124
}
2125
2126
static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
2127
struct rtw_chan_list *list, u8 *buf)
2128
{
2129
u8 *chan = &buf[list->size];
2130
u8 info_size = RTW_CH_INFO_SIZE;
2131
2132
if (list->size > list->buf_size)
2133
return -ENOMEM;
2134
2135
CH_INFO_SET_CH(chan, info->channel);
2136
CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx);
2137
CH_INFO_SET_BW(chan, info->bw);
2138
CH_INFO_SET_TIMEOUT(chan, info->timeout);
2139
CH_INFO_SET_ACTION_ID(chan, info->action_id);
2140
CH_INFO_SET_EXTRA_INFO(chan, info->extra_info);
2141
if (info->extra_info) {
2142
EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS);
2143
EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN);
2144
EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE -
2145
RTW_EX_CH_INFO_HDR_SIZE);
2146
EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME);
2147
info_size += RTW_EX_CH_INFO_SIZE;
2148
}
2149
list->size += info_size;
2150
list->ch_num++;
2151
2152
return 0;
2153
}
2154
2155
static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
2156
struct rtw_chan_list *list, u8 *buf)
2157
{
2158
struct cfg80211_scan_request *req = rtwvif->scan_req;
2159
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
2160
struct ieee80211_channel *channel;
2161
int i, ret = 0;
2162
2163
for (i = 0; i < req->n_channels; i++) {
2164
struct rtw_chan_info ch_info = {0};
2165
2166
channel = req->channels[i];
2167
ch_info.channel = channel->hw_value;
2168
ch_info.bw = RTW_SCAN_WIDTH;
2169
ch_info.pri_ch_idx = RTW_PRI_CH_IDX;
2170
ch_info.timeout = req->duration_mandatory ?
2171
req->duration : RTW_CHANNEL_TIME;
2172
2173
if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) {
2174
ch_info.action_id = RTW_CHANNEL_RADAR;
2175
ch_info.extra_info = 1;
2176
/* Overwrite duration for passive scans if necessary */
2177
ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ?
2178
ch_info.timeout : RTW_PASS_CHAN_TIME;
2179
} else {
2180
ch_info.action_id = RTW_CHANNEL_ACTIVE;
2181
}
2182
2183
ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf);
2184
if (ret)
2185
return ret;
2186
}
2187
2188
if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) {
2189
rtw_err(rtwdev, "List exceeds rsvd page total size\n");
2190
return -EINVAL;
2191
}
2192
2193
list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size;
2194
ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size);
2195
if (ret)
2196
rtw_err(rtwdev, "Download channel list failed\n");
2197
2198
return ret;
2199
}
2200
2201
static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev,
2202
struct rtw_ch_switch_option *opt,
2203
struct rtw_vif *rtwvif,
2204
struct rtw_chan_list *list)
2205
{
2206
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2207
struct cfg80211_scan_request *req = rtwvif->scan_req;
2208
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
2209
/* reserve one dummy page at the beginning for tx descriptor */
2210
u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1;
2211
bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
2212
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
2213
2214
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD);
2215
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN);
2216
2217
SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en);
2218
SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en);
2219
SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq);
2220
SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck);
2221
SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num);
2222
SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size);
2223
SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary);
2224
SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan);
2225
SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx);
2226
SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw);
2227
SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port);
2228
SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ?
2229
req->duration : RTW_CHANNEL_TIME);
2230
SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME);
2231
SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids);
2232
SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc);
2233
2234
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
2235
}
2236
2237
void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2238
struct ieee80211_scan_request *scan_req)
2239
{
2240
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
2241
struct cfg80211_scan_request *req = &scan_req->req;
2242
u8 mac_addr[ETH_ALEN];
2243
2244
rtwdev->scan_info.scanning_vif = vif;
2245
rtwvif->scan_ies = &scan_req->ies;
2246
rtwvif->scan_req = req;
2247
2248
ieee80211_stop_queues(rtwdev->hw);
2249
rtw_leave_lps_deep(rtwdev);
2250
rtw_hci_flush_all_queues(rtwdev, false);
2251
rtw_mac_flush_all_queues(rtwdev, false);
2252
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
2253
get_random_mask_addr(mac_addr, req->mac_addr,
2254
req->mac_addr_mask);
2255
else
2256
ether_addr_copy(mac_addr, vif->addr);
2257
2258
rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true);
2259
2260
rtwdev->hal.rcr &= ~BIT_CBSSID_BCN;
2261
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
2262
}
2263
2264
void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2265
bool aborted)
2266
{
2267
struct cfg80211_scan_info info = {
2268
.aborted = aborted,
2269
};
2270
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2271
struct rtw_hal *hal = &rtwdev->hal;
2272
struct rtw_vif *rtwvif;
2273
u8 chan = scan_info->op_chan;
2274
2275
if (!vif)
2276
return;
2277
2278
rtwdev->hal.rcr |= BIT_CBSSID_BCN;
2279
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
2280
2281
rtw_core_scan_complete(rtwdev, vif, true);
2282
2283
rtwvif = (struct rtw_vif *)vif->drv_priv;
2284
if (chan)
2285
rtw_store_op_chan(rtwdev, false);
2286
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
2287
ieee80211_wake_queues(rtwdev->hw);
2288
ieee80211_scan_completed(rtwdev->hw, &info);
2289
2290
rtwvif->scan_req = NULL;
2291
rtwvif->scan_ies = NULL;
2292
rtwdev->scan_info.scanning_vif = NULL;
2293
}
2294
2295
static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
2296
struct rtw_chan_list *list)
2297
{
2298
struct cfg80211_scan_request *req = rtwvif->scan_req;
2299
int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE);
2300
u8 *buf;
2301
int ret;
2302
2303
buf = kmalloc(size, GFP_KERNEL);
2304
if (!buf)
2305
return -ENOMEM;
2306
2307
ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif);
2308
if (ret) {
2309
rtw_err(rtwdev, "Update probe request failed\n");
2310
goto out;
2311
}
2312
2313
list->buf_size = size;
2314
list->size = 0;
2315
list->ch_num = 0;
2316
ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf);
2317
out:
2318
kfree(buf);
2319
2320
return ret;
2321
}
2322
2323
int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
2324
bool enable)
2325
{
2326
struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
2327
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2328
struct rtw_ch_switch_option cs_option = {0};
2329
struct rtw_chan_list chan_list = {0};
2330
int ret = 0;
2331
2332
if (!rtwvif)
2333
return -EINVAL;
2334
2335
cs_option.switch_en = enable;
2336
cs_option.back_op_en = scan_info->op_chan != 0;
2337
if (enable) {
2338
ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
2339
if (ret)
2340
goto out;
2341
}
2342
rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list);
2343
out:
2344
if (rtwdev->ap_active) {
2345
ret = rtw_download_beacon(rtwdev);
2346
if (ret)
2347
rtw_err(rtwdev, "HW scan download beacon failed\n");
2348
}
2349
2350
return ret;
2351
}
2352
2353
void rtw_hw_scan_abort(struct rtw_dev *rtwdev)
2354
{
2355
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2356
2357
if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
2358
return;
2359
2360
rtw_hw_scan_offload(rtwdev, vif, false);
2361
rtw_hw_scan_complete(rtwdev, vif, true);
2362
}
2363
2364
void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
2365
{
2366
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2367
struct rtw_c2h_cmd *c2h;
2368
bool aborted;
2369
u8 rc;
2370
2371
if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2372
return;
2373
2374
c2h = get_c2h_from_skb(skb);
2375
rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload);
2376
aborted = rc != RTW_SCAN_REPORT_SUCCESS;
2377
rtw_hw_scan_complete(rtwdev, vif, aborted);
2378
2379
if (aborted)
2380
rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
2381
}
2382
2383
void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup)
2384
{
2385
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2386
struct rtw_hal *hal = &rtwdev->hal;
2387
u8 band;
2388
2389
if (backup) {
2390
scan_info->op_chan = hal->current_channel;
2391
scan_info->op_bw = hal->current_band_width;
2392
scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
2393
scan_info->op_pri_ch = hal->primary_channel;
2394
} else {
2395
band = scan_info->op_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
2396
rtw_update_channel(rtwdev, scan_info->op_chan,
2397
scan_info->op_pri_ch,
2398
band, scan_info->op_bw);
2399
}
2400
}
2401
2402
void rtw_clear_op_chan(struct rtw_dev *rtwdev)
2403
{
2404
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2405
2406
scan_info->op_chan = 0;
2407
scan_info->op_bw = 0;
2408
scan_info->op_pri_ch_idx = 0;
2409
scan_info->op_pri_ch = 0;
2410
}
2411
2412
static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
2413
{
2414
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
2415
2416
return channel == scan_info->op_chan;
2417
}
2418
2419
void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
2420
{
2421
struct rtw_hal *hal = &rtwdev->hal;
2422
struct rtw_c2h_cmd *c2h;
2423
enum rtw_scan_notify_id id;
2424
u8 chan, band, status;
2425
2426
if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2427
return;
2428
2429
c2h = get_c2h_from_skb(skb);
2430
chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
2431
id = GET_CHAN_SWITCH_ID(c2h->payload);
2432
status = GET_CHAN_SWITCH_STATUS(c2h->payload);
2433
2434
if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
2435
band = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
2436
rtw_update_channel(rtwdev, chan, chan, band,
2437
RTW_CHANNEL_WIDTH_20);
2438
if (rtw_is_op_chan(rtwdev, chan)) {
2439
rtw_store_op_chan(rtwdev, false);
2440
ieee80211_wake_queues(rtwdev->hw);
2441
rtw_core_enable_beacon(rtwdev, true);
2442
}
2443
} else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
2444
if (IS_CH_5G_BAND(chan)) {
2445
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
2446
} else if (IS_CH_2G_BAND(chan)) {
2447
u8 chan_type;
2448
2449
if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
2450
chan_type = COEX_SWITCH_TO_24G;
2451
else
2452
chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
2453
rtw_coex_switchband_notify(rtwdev, chan_type);
2454
}
2455
/* The channel of C2H RTW_SCAN_NOTIFY_ID_PRESWITCH is next
2456
* channel that hardware will switch. We need to stop queue
2457
* if next channel is non-op channel.
2458
*/
2459
if (!rtw_is_op_chan(rtwdev, chan) &&
2460
rtw_is_op_chan(rtwdev, hal->current_channel)) {
2461
rtw_core_enable_beacon(rtwdev, false);
2462
ieee80211_stop_queues(rtwdev->hw);
2463
}
2464
}
2465
2466
rtw_dbg(rtwdev, RTW_DBG_HW_SCAN,
2467
"Chan switch: %x, id: %x, status: %x\n", chan, id, status);
2468
}
2469
2470