Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/rtw89/phy_be.c
104874 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/* Copyright(c) 2023 Realtek Corporation
3
*/
4
5
#include "debug.h"
6
#include "mac.h"
7
#include "phy.h"
8
#include "reg.h"
9
10
static const struct rtw89_ccx_regs rtw89_ccx_regs_be = {
11
.setting_addr = R_CCX,
12
.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK_V1,
13
.measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
14
.trig_opt_mask = B_CCX_TRIG_OPT_MSK,
15
.en_mask = B_CCX_EN_MSK,
16
.ifs_cnt_addr = R_IFS_COUNTER,
17
.ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
18
.ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
19
.ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
20
.ifs_collect_en_mask = B_IFS_COLLECT_EN,
21
.ifs_t1_addr = R_IFS_T1,
22
.ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
23
.ifs_t1_en_mask = B_IFS_T1_EN_MSK,
24
.ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
25
.ifs_t2_addr = R_IFS_T2,
26
.ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
27
.ifs_t2_en_mask = B_IFS_T2_EN_MSK,
28
.ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
29
.ifs_t3_addr = R_IFS_T3,
30
.ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
31
.ifs_t3_en_mask = B_IFS_T3_EN_MSK,
32
.ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
33
.ifs_t4_addr = R_IFS_T4,
34
.ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
35
.ifs_t4_en_mask = B_IFS_T4_EN_MSK,
36
.ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
37
.ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT_V1,
38
.ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
39
.ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
40
.ifs_clm_cca_addr = R_IFS_CLM_CCA_V1,
41
.ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
42
.ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
43
.ifs_clm_fa_addr = R_IFS_CLM_FA_V1,
44
.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
45
.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
46
.ifs_his_addr = R_IFS_HIS_V1,
47
.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
48
.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
49
.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
50
.ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
51
.ifs_avg_l_addr = R_IFS_AVG_L_V1,
52
.ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
53
.ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
54
.ifs_avg_h_addr = R_IFS_AVG_H_V1,
55
.ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
56
.ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
57
.ifs_cca_l_addr = R_IFS_CCA_L_V1,
58
.ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
59
.ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
60
.ifs_cca_h_addr = R_IFS_CCA_H_V1,
61
.ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
62
.ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
63
.ifs_total_addr = R_IFSCNT_V1,
64
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
65
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
66
.nhm = R_NHM_BE,
67
.nhm_ready = B_NHM_READY_BE_MSK,
68
.nhm_config = R_NHM_CFG,
69
.nhm_period_mask = B_NHM_PERIOD_MSK,
70
.nhm_unit_mask = B_NHM_COUNTER_MSK,
71
.nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
72
.nhm_en_mask = B_NHM_EN_MSK,
73
.nhm_method = R_NHM_TH9,
74
.nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
75
};
76
77
static const struct rtw89_physts_regs rtw89_physts_regs_be = {
78
.setting_addr = R_PLCP_HISTOGRAM,
79
.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
80
.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
81
};
82
83
static const struct rtw89_cfo_regs rtw89_cfo_regs_be = {
84
.comp = R_DCFO_WEIGHT_V1,
85
.weighting_mask = B_DCFO_WEIGHT_MSK_V1,
86
.comp_seg0 = R_DCFO_OPT_V1,
87
.valid_0_mask = B_DCFO_OPT_EN_V1,
88
};
89
90
static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
91
{
92
u32 phy_page = addr >> 8;
93
u32 ofst = 0;
94
95
if ((phy_page >= 0x4 && phy_page <= 0xF) ||
96
(phy_page >= 0x20 && phy_page <= 0x2B) ||
97
(phy_page >= 0x40 && phy_page <= 0x4f) ||
98
(phy_page >= 0x60 && phy_page <= 0x6f) ||
99
(phy_page >= 0xE4 && phy_page <= 0xE5) ||
100
(phy_page >= 0xE8 && phy_page <= 0xED))
101
ofst = 0x1000;
102
else
103
ofst = 0x0;
104
105
return ofst;
106
}
107
108
union rtw89_phy_bb_gain_arg_be {
109
u32 addr;
110
struct {
111
u8 type;
112
#define BB_GAIN_TYPE_SUB0_BE GENMASK(3, 0)
113
#define BB_GAIN_TYPE_SUB1_BE GENMASK(7, 4)
114
u8 path_bw;
115
#define BB_GAIN_PATH_BE GENMASK(3, 0)
116
#define BB_GAIN_BW_BE GENMASK(7, 4)
117
u8 gain_band;
118
u8 cfg_type;
119
} __packed;
120
} __packed;
121
122
static void
123
rtw89_phy_cfg_bb_gain_error_be(struct rtw89_dev *rtwdev,
124
union rtw89_phy_bb_gain_arg_be arg, u32 data)
125
{
126
struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
127
u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
128
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
129
u8 gband = arg.gain_band;
130
u8 type = arg.type;
131
int i;
132
133
switch (type) {
134
case 0:
135
for (i = 0; i < 4; i++, data >>= 8)
136
gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
137
break;
138
case 1:
139
for (i = 4; i < 7; i++, data >>= 8)
140
gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
141
break;
142
case 2:
143
for (i = 0; i < 2; i++, data >>= 8)
144
gain->tia_gain[gband][bw_type][path][i] = data & 0xff;
145
break;
146
default:
147
rtw89_warn(rtwdev,
148
"bb gain error {0x%x:0x%x} with unknown type: %d\n",
149
arg.addr, data, type);
150
break;
151
}
152
}
153
154
static void
155
rtw89_phy_cfg_bb_rpl_ofst_be(struct rtw89_dev *rtwdev,
156
union rtw89_phy_bb_gain_arg_be arg, u32 data)
157
{
158
struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
159
u8 type_sub0 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB0_BE);
160
u8 type_sub1 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB1_BE);
161
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
162
u8 gband = arg.gain_band;
163
u8 ofst = 0;
164
int i;
165
166
switch (type_sub1) {
167
case RTW89_CMAC_BW_20M:
168
gain->rpl_ofst_20[gband][path][0] = (s8)data;
169
break;
170
case RTW89_CMAC_BW_40M:
171
for (i = 0; i < RTW89_BW20_SC_40M; i++, data >>= 8)
172
gain->rpl_ofst_40[gband][path][i] = data & 0xff;
173
break;
174
case RTW89_CMAC_BW_80M:
175
for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
176
gain->rpl_ofst_80[gband][path][i] = data & 0xff;
177
break;
178
case RTW89_CMAC_BW_160M:
179
if (type_sub0 == 0)
180
ofst = 0;
181
else
182
ofst = RTW89_BW20_SC_80M;
183
184
for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
185
gain->rpl_ofst_160[gband][path][i + ofst] = data & 0xff;
186
break;
187
default:
188
rtw89_warn(rtwdev,
189
"bb rpl ofst {0x%x:0x%x} with unknown type_sub1: %d\n",
190
arg.addr, data, type_sub1);
191
break;
192
}
193
}
194
195
static void
196
rtw89_phy_cfg_bb_gain_op1db_be(struct rtw89_dev *rtwdev,
197
union rtw89_phy_bb_gain_arg_be arg, u32 data)
198
{
199
struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
200
u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
201
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
202
u8 gband = arg.gain_band;
203
u8 type = arg.type;
204
int i;
205
206
switch (type) {
207
case 0:
208
for (i = 0; i < 4; i++, data >>= 8)
209
gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
210
break;
211
case 1:
212
for (i = 4; i < 7; i++, data >>= 8)
213
gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
214
break;
215
case 2:
216
for (i = 0; i < 4; i++, data >>= 8)
217
gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
218
break;
219
case 3:
220
for (i = 4; i < 8; i++, data >>= 8)
221
gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
222
break;
223
default:
224
rtw89_warn(rtwdev,
225
"bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
226
arg.addr, data, type);
227
break;
228
}
229
}
230
231
static void rtw89_phy_config_bb_gain_be(struct rtw89_dev *rtwdev,
232
const struct rtw89_reg2_def *reg,
233
enum rtw89_rf_path rf_path,
234
void *extra_data)
235
{
236
const struct rtw89_chip_info *chip = rtwdev->chip;
237
union rtw89_phy_bb_gain_arg_be arg = { .addr = reg->addr };
238
struct rtw89_efuse *efuse = &rtwdev->efuse;
239
u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
240
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
241
242
if (bw_type >= RTW89_BB_BW_NR_BE)
243
return;
244
245
if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR_BE)
246
return;
247
248
if (path >= chip->rf_path_num)
249
return;
250
251
if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
252
rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
253
return;
254
}
255
256
switch (arg.cfg_type) {
257
case 0:
258
rtw89_phy_cfg_bb_gain_error_be(rtwdev, arg, reg->data);
259
break;
260
case 1:
261
rtw89_phy_cfg_bb_rpl_ofst_be(rtwdev, arg, reg->data);
262
break;
263
case 2:
264
/* ignore BB gain bypass */
265
break;
266
case 3:
267
rtw89_phy_cfg_bb_gain_op1db_be(rtwdev, arg, reg->data);
268
break;
269
case 15:
270
rtw89_phy_write32_idx(rtwdev, reg->addr & 0xFFFFF, MASKHWORD,
271
reg->data, RTW89_PHY_0);
272
break;
273
case 4:
274
/* This cfg_type is only used by rfe_type >= 50 with eFEM */
275
if (efuse->rfe_type < 50)
276
break;
277
fallthrough;
278
default:
279
rtw89_warn(rtwdev,
280
"bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
281
arg.addr, reg->data, arg.cfg_type);
282
break;
283
}
284
}
285
286
static void rtw89_phy_preinit_rf_nctl_be(struct rtw89_dev *rtwdev)
287
{
288
rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C0, B_GOTX_IQKDPK, 0x3);
289
rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C1, B_GOTX_IQKDPK, 0x3);
290
rtw89_phy_write32_mask(rtwdev, R_IQKDPK_HC, B_IQKDPK_HC, 0x1);
291
rtw89_phy_write32_mask(rtwdev, R_CLK_GCK, B_CLK_GCK, 0x00fffff);
292
rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
293
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
294
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST, B_IQK_DPK_PRST, 0x1);
295
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_C1, B_IQK_DPK_PRST, 0x1);
296
rtw89_phy_write32_mask(rtwdev, R_TXRFC, B_TXRFC_RST, 0x1);
297
298
if (rtwdev->dbcc_en) {
299
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST_C1, B_IQK_DPK_RST, 0x1);
300
rtw89_phy_write32_mask(rtwdev, R_TXRFC_C1, B_TXRFC_RST, 0x1);
301
}
302
}
303
304
static
305
void rtw89_phy_bb_wrap_pwr_by_macid_init(struct rtw89_dev *rtwdev)
306
{
307
u32 macid_idx, cr, base_macid_lmt, max_macid = 32;
308
309
base_macid_lmt = R_BE_PWR_MACID_LMT_BASE;
310
311
for (macid_idx = 0; macid_idx < 4 * max_macid; macid_idx += 4) {
312
cr = base_macid_lmt + macid_idx;
313
rtw89_write32(rtwdev, cr, 0x03007F7F);
314
}
315
}
316
317
static
318
void rtw89_phy_bb_wrap_tx_path_by_macid_init(struct rtw89_dev *rtwdev)
319
{
320
int i, max_macid = 32;
321
u32 cr = R_BE_PWR_MACID_PATH_BASE;
322
323
for (i = 0; i < max_macid; i++, cr += 4)
324
rtw89_write32(rtwdev, cr, 0x03C86000);
325
}
326
327
static void rtw89_phy_bb_wrap_tpu_set_all(struct rtw89_dev *rtwdev,
328
enum rtw89_mac_idx mac_idx)
329
{
330
u32 addr;
331
332
for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4)
333
rtw89_write32(rtwdev, addr, 0);
334
for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4)
335
rtw89_write32(rtwdev, addr, 0);
336
for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4)
337
rtw89_write32(rtwdev, addr, 0);
338
339
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_REF_CTRL, mac_idx);
340
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMT_DB, 0);
341
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_LMTBF, mac_idx);
342
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMTBF_DB, 0);
343
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
344
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_BYRATE_DB, 0);
345
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
346
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_RULMT_DB, 0);
347
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_SW, mac_idx);
348
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_SW_DB, 0);
349
}
350
351
static
352
void rtw89_phy_bb_wrap_listen_path_en_init(struct rtw89_dev *rtwdev)
353
{
354
u32 addr;
355
int ret;
356
357
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
358
if (ret)
359
return;
360
361
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_LISTEN_PATH, RTW89_MAC_1);
362
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_LISTEN_PATH_EN, 0x2);
363
}
364
365
static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
366
enum rtw89_mac_idx mac_idx)
367
{
368
u32 addr;
369
370
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_LMT, mac_idx);
371
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_LMT_ON, 0);
372
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, mac_idx);
373
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RATE_ON, 0);
374
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
375
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
376
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
377
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
378
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ALL, 0);
379
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
380
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
381
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
382
rtw89_write32_mask(rtwdev, addr, B_BE_FORCE_PWR_BY_RATE_EN, 0);
383
}
384
385
static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
386
enum rtw89_mac_idx mac_idx)
387
{
388
u32 addr;
389
390
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM, mac_idx);
391
rtw89_write32(rtwdev, addr, 0xE4E431);
392
393
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM_SS, mac_idx);
394
rtw89_write32_mask(rtwdev, addr, 0x7, 0);
395
}
396
397
static void rtw89_phy_bb_wrap_ul_pwr(struct rtw89_dev *rtwdev)
398
{
399
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
400
u8 mac_idx;
401
u32 addr;
402
403
if (chip_id != RTL8922A)
404
return;
405
406
for (mac_idx = 0; mac_idx < RTW89_MAC_NUM; mac_idx++) {
407
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RSSI_TARGET_LMT, mac_idx);
408
rtw89_write32(rtwdev, addr, 0x0201FE00);
409
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_TH, mac_idx);
410
rtw89_write32(rtwdev, addr, 0x00FFEC7E);
411
}
412
}
413
414
static void __rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev,
415
enum rtw89_mac_idx mac_idx)
416
{
417
rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
418
rtw89_phy_bb_wrap_tx_path_by_macid_init(rtwdev);
419
rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
420
rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
421
rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
422
rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
423
rtw89_phy_bb_wrap_ul_pwr(rtwdev);
424
}
425
426
static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
427
{
428
__rtw89_phy_bb_wrap_init_be(rtwdev, RTW89_MAC_0);
429
if (rtwdev->dbcc_en)
430
__rtw89_phy_bb_wrap_init_be(rtwdev, RTW89_MAC_1);
431
}
432
433
static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
434
{
435
rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG_LEN, 0x0);
436
rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG, 0xf);
437
rtw89_phy_write32_mask(rtwdev, R_CHINFO_DATA, B_CHINFO_DATA_BITMAP, 0x1);
438
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_ELM_BITMAP, 0x40303);
439
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_SRC, 0x0);
440
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_TYPE, 0x3);
441
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_SCAL, 0x0);
442
}
443
444
struct rtw89_byr_spec_ent_be {
445
struct rtw89_rate_desc init;
446
u8 num_of_idx;
447
bool no_over_bw40;
448
bool no_multi_nss;
449
};
450
451
static const struct rtw89_byr_spec_ent_be rtw89_byr_spec_be[] = {
452
{
453
.init = { .rs = RTW89_RS_CCK },
454
.num_of_idx = RTW89_RATE_CCK_NUM,
455
.no_over_bw40 = true,
456
.no_multi_nss = true,
457
},
458
{
459
.init = { .rs = RTW89_RS_OFDM },
460
.num_of_idx = RTW89_RATE_OFDM_NUM,
461
.no_multi_nss = true,
462
},
463
{
464
.init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_NON_OFDMA },
465
.num_of_idx = 2,
466
.no_multi_nss = true,
467
},
468
{
469
.init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_OFDMA },
470
.num_of_idx = 2,
471
.no_multi_nss = true,
472
},
473
{
474
.init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_NON_OFDMA },
475
.num_of_idx = 14,
476
},
477
{
478
.init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_NON_OFDMA },
479
.num_of_idx = RTW89_RATE_HEDCM_NUM,
480
},
481
{
482
.init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_OFDMA },
483
.num_of_idx = 14,
484
},
485
{
486
.init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_OFDMA },
487
.num_of_idx = RTW89_RATE_HEDCM_NUM,
488
},
489
};
490
491
static
492
void __phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev, u8 band, u8 bw,
493
u8 nss, u32 *addr, enum rtw89_phy_idx phy_idx)
494
{
495
const struct rtw89_byr_spec_ent_be *ent;
496
struct rtw89_rate_desc desc;
497
int pos = 0;
498
int i, j;
499
u32 val;
500
s8 v[4];
501
502
for (i = 0; i < ARRAY_SIZE(rtw89_byr_spec_be); i++) {
503
ent = &rtw89_byr_spec_be[i];
504
505
if (bw > RTW89_CHANNEL_WIDTH_40 && ent->no_over_bw40)
506
continue;
507
if (nss > RTW89_NSS_1 && ent->no_multi_nss)
508
continue;
509
510
desc = ent->init;
511
desc.nss = nss;
512
for (j = 0; j < ent->num_of_idx; j++, desc.idx++) {
513
v[pos] = rtw89_phy_read_txpwr_byrate(rtwdev, band, bw,
514
&desc);
515
pos = (pos + 1) % 4;
516
if (pos)
517
continue;
518
519
val = u32_encode_bits(v[0], GENMASK(7, 0)) |
520
u32_encode_bits(v[1], GENMASK(15, 8)) |
521
u32_encode_bits(v[2], GENMASK(23, 16)) |
522
u32_encode_bits(v[3], GENMASK(31, 24));
523
524
rtw89_mac_txpwr_write32(rtwdev, phy_idx, *addr, val);
525
*addr += 4;
526
}
527
}
528
}
529
530
static void rtw89_phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev,
531
const struct rtw89_chan *chan,
532
enum rtw89_phy_idx phy_idx)
533
{
534
u32 addr = R_BE_PWR_BY_RATE;
535
u8 band = chan->band_type;
536
u8 bw, nss;
537
538
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
539
"[TXPWR] set txpwr byrate on band %d\n", band);
540
541
for (bw = 0; bw <= RTW89_CHANNEL_WIDTH_320; bw++)
542
for (nss = 0; nss <= RTW89_NSS_2; nss++)
543
__phy_set_txpwr_byrate_be(rtwdev, band, bw, nss,
544
&addr, phy_idx);
545
}
546
547
static void rtw89_phy_set_txpwr_offset_be(struct rtw89_dev *rtwdev,
548
const struct rtw89_chan *chan,
549
enum rtw89_phy_idx phy_idx)
550
{
551
struct rtw89_rate_desc desc = {
552
.nss = RTW89_NSS_1,
553
.rs = RTW89_RS_OFFSET,
554
};
555
u8 band = chan->band_type;
556
s8 v[RTW89_RATE_OFFSET_NUM_BE] = {};
557
u32 val;
558
559
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
560
"[TXPWR] set txpwr offset on band %d\n", band);
561
562
for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_BE; desc.idx++)
563
v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc);
564
565
val = u32_encode_bits(v[RTW89_RATE_OFFSET_CCK], GENMASK(3, 0)) |
566
u32_encode_bits(v[RTW89_RATE_OFFSET_OFDM], GENMASK(7, 4)) |
567
u32_encode_bits(v[RTW89_RATE_OFFSET_HT], GENMASK(11, 8)) |
568
u32_encode_bits(v[RTW89_RATE_OFFSET_VHT], GENMASK(15, 12)) |
569
u32_encode_bits(v[RTW89_RATE_OFFSET_HE], GENMASK(19, 16)) |
570
u32_encode_bits(v[RTW89_RATE_OFFSET_EHT], GENMASK(23, 20)) |
571
u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_HE], GENMASK(27, 24)) |
572
u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_EHT], GENMASK(31, 28));
573
574
rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_BE_PWR_RATE_OFST_CTRL, val);
575
}
576
577
static void
578
fill_limit_nonbf_bf(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM],
579
u8 band, u8 bw, u8 ntx, u8 rs, u8 ch)
580
{
581
int bf;
582
583
for (bf = 0; bf < RTW89_BF_NUM; bf++)
584
(*ptr)[bf] = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, ntx,
585
rs, bf, ch);
586
}
587
588
static void
589
fill_limit_nonbf_bf_min(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM],
590
u8 band, u8 bw, u8 ntx, u8 rs, u8 ch1, u8 ch2)
591
{
592
s8 v1[RTW89_BF_NUM];
593
s8 v2[RTW89_BF_NUM];
594
int bf;
595
596
fill_limit_nonbf_bf(rtwdev, &v1, band, bw, ntx, rs, ch1);
597
fill_limit_nonbf_bf(rtwdev, &v2, band, bw, ntx, rs, ch2);
598
599
for (bf = 0; bf < RTW89_BF_NUM; bf++)
600
(*ptr)[bf] = min(v1[bf], v2[bf]);
601
}
602
603
static void phy_fill_limit_20m_be(struct rtw89_dev *rtwdev,
604
struct rtw89_txpwr_limit_be *lmt,
605
u8 band, u8 ntx, u8 ch)
606
{
607
fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band,
608
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch);
609
fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band,
610
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch);
611
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
612
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, ch);
613
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
614
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch);
615
}
616
617
static void phy_fill_limit_40m_be(struct rtw89_dev *rtwdev,
618
struct rtw89_txpwr_limit_be *lmt,
619
u8 band, u8 ntx, u8 ch, u8 pri_ch)
620
{
621
fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band,
622
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch - 2);
623
fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band,
624
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch);
625
626
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
627
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
628
629
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
630
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
631
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
632
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
633
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
634
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch);
635
}
636
637
static void phy_fill_limit_80m_be(struct rtw89_dev *rtwdev,
638
struct rtw89_txpwr_limit_be *lmt,
639
u8 band, u8 ntx, u8 ch, u8 pri_ch)
640
{
641
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
642
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
643
644
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
645
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6);
646
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
647
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
648
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band,
649
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
650
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band,
651
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6);
652
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
653
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4);
654
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band,
655
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4);
656
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band,
657
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch);
658
659
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band,
660
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
661
ch - 4, ch + 4);
662
}
663
664
static void phy_fill_limit_160m_be(struct rtw89_dev *rtwdev,
665
struct rtw89_txpwr_limit_be *lmt,
666
u8 band, u8 ntx, u8 ch, u8 pri_ch)
667
{
668
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
669
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
670
671
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
672
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14);
673
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
674
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10);
675
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band,
676
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6);
677
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band,
678
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
679
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band,
680
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
681
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band,
682
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6);
683
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band,
684
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10);
685
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band,
686
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14);
687
688
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
689
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12);
690
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band,
691
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4);
692
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band,
693
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4);
694
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band,
695
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12);
696
697
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band,
698
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8);
699
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band,
700
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8);
701
702
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band,
703
RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch);
704
705
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band,
706
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
707
ch - 12, ch - 4);
708
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band,
709
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
710
ch + 4, ch + 12);
711
}
712
713
static void phy_fill_limit_320m_be(struct rtw89_dev *rtwdev,
714
struct rtw89_txpwr_limit_be *lmt,
715
u8 band, u8 ntx, u8 ch, u8 pri_ch)
716
{
717
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
718
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
719
720
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
721
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 30);
722
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
723
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 26);
724
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band,
725
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 22);
726
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band,
727
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 18);
728
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band,
729
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14);
730
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band,
731
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10);
732
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band,
733
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6);
734
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band,
735
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
736
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[8], band,
737
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
738
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[9], band,
739
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6);
740
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[10], band,
741
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10);
742
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[11], band,
743
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14);
744
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[12], band,
745
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 18);
746
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[13], band,
747
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 22);
748
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[14], band,
749
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 26);
750
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[15], band,
751
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 30);
752
753
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
754
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 28);
755
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band,
756
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 20);
757
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band,
758
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12);
759
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band,
760
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4);
761
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[4], band,
762
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4);
763
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[5], band,
764
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12);
765
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[6], band,
766
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 20);
767
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[7], band,
768
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 28);
769
770
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band,
771
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 24);
772
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band,
773
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8);
774
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[2], band,
775
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8);
776
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[3], band,
777
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 24);
778
779
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band,
780
RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch - 16);
781
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[1], band,
782
RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch + 16);
783
784
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_320m, band,
785
RTW89_CHANNEL_WIDTH_320, ntx, RTW89_RS_MCS, ch);
786
787
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band,
788
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
789
ch - 28, ch - 20);
790
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band,
791
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
792
ch - 12, ch - 4);
793
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_4p5, band,
794
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
795
ch + 4, ch + 12);
796
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_6p5, band,
797
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
798
ch + 20, ch + 28);
799
}
800
801
static void rtw89_phy_fill_limit_be(struct rtw89_dev *rtwdev,
802
const struct rtw89_chan *chan,
803
struct rtw89_txpwr_limit_be *lmt,
804
u8 ntx)
805
{
806
u8 band = chan->band_type;
807
u8 pri_ch = chan->primary_channel;
808
u8 ch = chan->channel;
809
u8 bw = chan->band_width;
810
811
memset(lmt, 0, sizeof(*lmt));
812
813
switch (bw) {
814
case RTW89_CHANNEL_WIDTH_20:
815
phy_fill_limit_20m_be(rtwdev, lmt, band, ntx, ch);
816
break;
817
case RTW89_CHANNEL_WIDTH_40:
818
phy_fill_limit_40m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
819
break;
820
case RTW89_CHANNEL_WIDTH_80:
821
phy_fill_limit_80m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
822
break;
823
case RTW89_CHANNEL_WIDTH_160:
824
phy_fill_limit_160m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
825
break;
826
case RTW89_CHANNEL_WIDTH_320:
827
phy_fill_limit_320m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
828
break;
829
}
830
}
831
832
static void rtw89_phy_set_txpwr_limit_be(struct rtw89_dev *rtwdev,
833
const struct rtw89_chan *chan,
834
enum rtw89_phy_idx phy_idx)
835
{
836
struct rtw89_txpwr_limit_be lmt;
837
const s8 *ptr;
838
u32 addr, val;
839
u8 i, j;
840
841
BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_be) !=
842
RTW89_TXPWR_LMT_PAGE_SIZE_BE);
843
844
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
845
"[TXPWR] set txpwr limit on band %d bw %d\n",
846
chan->band_type, chan->band_width);
847
848
addr = R_BE_PWR_LMT;
849
for (i = 0; i <= RTW89_NSS_2; i++) {
850
rtw89_phy_fill_limit_be(rtwdev, chan, &lmt, i);
851
852
ptr = (s8 *)&lmt;
853
for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_BE;
854
j += 4, addr += 4, ptr += 4) {
855
val = u32_encode_bits(ptr[0], GENMASK(7, 0)) |
856
u32_encode_bits(ptr[1], GENMASK(15, 8)) |
857
u32_encode_bits(ptr[2], GENMASK(23, 16)) |
858
u32_encode_bits(ptr[3], GENMASK(31, 24));
859
860
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
861
}
862
}
863
}
864
865
static void fill_limit_ru_each(struct rtw89_dev *rtwdev, u8 index,
866
struct rtw89_txpwr_limit_ru_be *lmt_ru,
867
u8 band, u8 ntx, u8 ch)
868
{
869
lmt_ru->ru26[index] =
870
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, ntx, ch);
871
lmt_ru->ru52[index] =
872
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52, ntx, ch);
873
lmt_ru->ru106[index] =
874
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106, ntx, ch);
875
lmt_ru->ru52_26[index] =
876
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52_26, ntx, ch);
877
lmt_ru->ru106_26[index] =
878
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106_26, ntx, ch);
879
}
880
881
static void phy_fill_limit_ru_20m_be(struct rtw89_dev *rtwdev,
882
struct rtw89_txpwr_limit_ru_be *lmt_ru,
883
u8 band, u8 ntx, u8 ch)
884
{
885
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch);
886
}
887
888
static void phy_fill_limit_ru_40m_be(struct rtw89_dev *rtwdev,
889
struct rtw89_txpwr_limit_ru_be *lmt_ru,
890
u8 band, u8 ntx, u8 ch)
891
{
892
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 2);
893
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch + 2);
894
}
895
896
static void phy_fill_limit_ru_80m_be(struct rtw89_dev *rtwdev,
897
struct rtw89_txpwr_limit_ru_be *lmt_ru,
898
u8 band, u8 ntx, u8 ch)
899
{
900
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 6);
901
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 2);
902
fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch + 2);
903
fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch + 6);
904
}
905
906
static void phy_fill_limit_ru_160m_be(struct rtw89_dev *rtwdev,
907
struct rtw89_txpwr_limit_ru_be *lmt_ru,
908
u8 band, u8 ntx, u8 ch)
909
{
910
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 14);
911
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 10);
912
fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 6);
913
fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 2);
914
fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch + 2);
915
fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch + 6);
916
fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch + 10);
917
fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch + 14);
918
}
919
920
static void phy_fill_limit_ru_320m_be(struct rtw89_dev *rtwdev,
921
struct rtw89_txpwr_limit_ru_be *lmt_ru,
922
u8 band, u8 ntx, u8 ch)
923
{
924
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 30);
925
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 26);
926
fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 22);
927
fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 18);
928
fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch - 14);
929
fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch - 10);
930
fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch - 6);
931
fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch - 2);
932
fill_limit_ru_each(rtwdev, 8, lmt_ru, band, ntx, ch + 2);
933
fill_limit_ru_each(rtwdev, 9, lmt_ru, band, ntx, ch + 6);
934
fill_limit_ru_each(rtwdev, 10, lmt_ru, band, ntx, ch + 10);
935
fill_limit_ru_each(rtwdev, 11, lmt_ru, band, ntx, ch + 14);
936
fill_limit_ru_each(rtwdev, 12, lmt_ru, band, ntx, ch + 18);
937
fill_limit_ru_each(rtwdev, 13, lmt_ru, band, ntx, ch + 22);
938
fill_limit_ru_each(rtwdev, 14, lmt_ru, band, ntx, ch + 26);
939
fill_limit_ru_each(rtwdev, 15, lmt_ru, band, ntx, ch + 30);
940
}
941
942
static void rtw89_phy_fill_limit_ru_be(struct rtw89_dev *rtwdev,
943
const struct rtw89_chan *chan,
944
struct rtw89_txpwr_limit_ru_be *lmt_ru,
945
u8 ntx)
946
{
947
u8 band = chan->band_type;
948
u8 ch = chan->channel;
949
u8 bw = chan->band_width;
950
951
memset(lmt_ru, 0, sizeof(*lmt_ru));
952
953
switch (bw) {
954
case RTW89_CHANNEL_WIDTH_20:
955
phy_fill_limit_ru_20m_be(rtwdev, lmt_ru, band, ntx, ch);
956
break;
957
case RTW89_CHANNEL_WIDTH_40:
958
phy_fill_limit_ru_40m_be(rtwdev, lmt_ru, band, ntx, ch);
959
break;
960
case RTW89_CHANNEL_WIDTH_80:
961
phy_fill_limit_ru_80m_be(rtwdev, lmt_ru, band, ntx, ch);
962
break;
963
case RTW89_CHANNEL_WIDTH_160:
964
phy_fill_limit_ru_160m_be(rtwdev, lmt_ru, band, ntx, ch);
965
break;
966
case RTW89_CHANNEL_WIDTH_320:
967
phy_fill_limit_ru_320m_be(rtwdev, lmt_ru, band, ntx, ch);
968
break;
969
}
970
}
971
972
static void rtw89_phy_set_txpwr_limit_ru_be(struct rtw89_dev *rtwdev,
973
const struct rtw89_chan *chan,
974
enum rtw89_phy_idx phy_idx)
975
{
976
struct rtw89_txpwr_limit_ru_be lmt_ru;
977
const s8 *ptr;
978
u32 addr, val;
979
u8 i, j;
980
981
BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_be) !=
982
RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE);
983
984
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
985
"[TXPWR] set txpwr limit ru on band %d bw %d\n",
986
chan->band_type, chan->band_width);
987
988
addr = R_BE_PWR_RU_LMT;
989
for (i = 0; i <= RTW89_NSS_2; i++) {
990
rtw89_phy_fill_limit_ru_be(rtwdev, chan, &lmt_ru, i);
991
992
ptr = (s8 *)&lmt_ru;
993
for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE;
994
j += 4, addr += 4, ptr += 4) {
995
val = u32_encode_bits(ptr[0], GENMASK(7, 0)) |
996
u32_encode_bits(ptr[1], GENMASK(15, 8)) |
997
u32_encode_bits(ptr[2], GENMASK(23, 16)) |
998
u32_encode_bits(ptr[3], GENMASK(31, 24));
999
1000
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
1001
}
1002
}
1003
}
1004
1005
const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
1006
.cr_base = 0x20000,
1007
.ccx = &rtw89_ccx_regs_be,
1008
.physts = &rtw89_physts_regs_be,
1009
.cfo = &rtw89_cfo_regs_be,
1010
.phy0_phy1_offset = rtw89_phy0_phy1_offset_be,
1011
.config_bb_gain = rtw89_phy_config_bb_gain_be,
1012
.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be,
1013
.bb_wrap_init = rtw89_phy_bb_wrap_init_be,
1014
.ch_info_init = rtw89_phy_ch_info_init_be,
1015
1016
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be,
1017
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_be,
1018
.set_txpwr_limit = rtw89_phy_set_txpwr_limit_be,
1019
.set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_be,
1020
};
1021
EXPORT_SYMBOL(rtw89_phy_gen_be);
1022
1023