Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/rtw89/phy_be.c
48253 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/* Copyright(c) 2023 Realtek Corporation
3
*/
4
5
#include "debug.h"
6
#include "mac.h"
7
#include "phy.h"
8
#include "reg.h"
9
10
static const struct rtw89_ccx_regs rtw89_ccx_regs_be = {
11
.setting_addr = R_CCX,
12
.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK_V1,
13
.measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
14
.trig_opt_mask = B_CCX_TRIG_OPT_MSK,
15
.en_mask = B_CCX_EN_MSK,
16
.ifs_cnt_addr = R_IFS_COUNTER,
17
.ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
18
.ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
19
.ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
20
.ifs_collect_en_mask = B_IFS_COLLECT_EN,
21
.ifs_t1_addr = R_IFS_T1,
22
.ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
23
.ifs_t1_en_mask = B_IFS_T1_EN_MSK,
24
.ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
25
.ifs_t2_addr = R_IFS_T2,
26
.ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
27
.ifs_t2_en_mask = B_IFS_T2_EN_MSK,
28
.ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
29
.ifs_t3_addr = R_IFS_T3,
30
.ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
31
.ifs_t3_en_mask = B_IFS_T3_EN_MSK,
32
.ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
33
.ifs_t4_addr = R_IFS_T4,
34
.ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
35
.ifs_t4_en_mask = B_IFS_T4_EN_MSK,
36
.ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
37
.ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT_V1,
38
.ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
39
.ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
40
.ifs_clm_cca_addr = R_IFS_CLM_CCA_V1,
41
.ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
42
.ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
43
.ifs_clm_fa_addr = R_IFS_CLM_FA_V1,
44
.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
45
.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
46
.ifs_his_addr = R_IFS_HIS_V1,
47
.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
48
.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
49
.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
50
.ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
51
.ifs_avg_l_addr = R_IFS_AVG_L_V1,
52
.ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
53
.ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
54
.ifs_avg_h_addr = R_IFS_AVG_H_V1,
55
.ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
56
.ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
57
.ifs_cca_l_addr = R_IFS_CCA_L_V1,
58
.ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
59
.ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
60
.ifs_cca_h_addr = R_IFS_CCA_H_V1,
61
.ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
62
.ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
63
.ifs_total_addr = R_IFSCNT_V1,
64
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
65
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
66
};
67
68
static const struct rtw89_physts_regs rtw89_physts_regs_be = {
69
.setting_addr = R_PLCP_HISTOGRAM,
70
.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
71
.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
72
};
73
74
static const struct rtw89_cfo_regs rtw89_cfo_regs_be = {
75
.comp = R_DCFO_WEIGHT_V1,
76
.weighting_mask = B_DCFO_WEIGHT_MSK_V1,
77
.comp_seg0 = R_DCFO_OPT_V1,
78
.valid_0_mask = B_DCFO_OPT_EN_V1,
79
};
80
81
static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
82
{
83
u32 phy_page = addr >> 8;
84
u32 ofst = 0;
85
86
if ((phy_page >= 0x4 && phy_page <= 0xF) ||
87
(phy_page >= 0x20 && phy_page <= 0x2B) ||
88
(phy_page >= 0x40 && phy_page <= 0x4f) ||
89
(phy_page >= 0x60 && phy_page <= 0x6f) ||
90
(phy_page >= 0xE4 && phy_page <= 0xE5) ||
91
(phy_page >= 0xE8 && phy_page <= 0xED))
92
ofst = 0x1000;
93
else
94
ofst = 0x0;
95
96
return ofst;
97
}
98
99
union rtw89_phy_bb_gain_arg_be {
100
u32 addr;
101
struct {
102
u8 type;
103
#define BB_GAIN_TYPE_SUB0_BE GENMASK(3, 0)
104
#define BB_GAIN_TYPE_SUB1_BE GENMASK(7, 4)
105
u8 path_bw;
106
#define BB_GAIN_PATH_BE GENMASK(3, 0)
107
#define BB_GAIN_BW_BE GENMASK(7, 4)
108
u8 gain_band;
109
u8 cfg_type;
110
} __packed;
111
} __packed;
112
113
static void
114
rtw89_phy_cfg_bb_gain_error_be(struct rtw89_dev *rtwdev,
115
union rtw89_phy_bb_gain_arg_be arg, u32 data)
116
{
117
struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
118
u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
119
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
120
u8 gband = arg.gain_band;
121
u8 type = arg.type;
122
int i;
123
124
switch (type) {
125
case 0:
126
for (i = 0; i < 4; i++, data >>= 8)
127
gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
128
break;
129
case 1:
130
for (i = 4; i < 7; i++, data >>= 8)
131
gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
132
break;
133
case 2:
134
for (i = 0; i < 2; i++, data >>= 8)
135
gain->tia_gain[gband][bw_type][path][i] = data & 0xff;
136
break;
137
default:
138
rtw89_warn(rtwdev,
139
"bb gain error {0x%x:0x%x} with unknown type: %d\n",
140
arg.addr, data, type);
141
break;
142
}
143
}
144
145
static void
146
rtw89_phy_cfg_bb_rpl_ofst_be(struct rtw89_dev *rtwdev,
147
union rtw89_phy_bb_gain_arg_be arg, u32 data)
148
{
149
struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
150
u8 type_sub0 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB0_BE);
151
u8 type_sub1 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB1_BE);
152
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
153
u8 gband = arg.gain_band;
154
u8 ofst = 0;
155
int i;
156
157
switch (type_sub1) {
158
case RTW89_CMAC_BW_20M:
159
gain->rpl_ofst_20[gband][path][0] = (s8)data;
160
break;
161
case RTW89_CMAC_BW_40M:
162
for (i = 0; i < RTW89_BW20_SC_40M; i++, data >>= 8)
163
gain->rpl_ofst_40[gband][path][i] = data & 0xff;
164
break;
165
case RTW89_CMAC_BW_80M:
166
for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
167
gain->rpl_ofst_80[gband][path][i] = data & 0xff;
168
break;
169
case RTW89_CMAC_BW_160M:
170
if (type_sub0 == 0)
171
ofst = 0;
172
else
173
ofst = RTW89_BW20_SC_80M;
174
175
for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
176
gain->rpl_ofst_160[gband][path][i + ofst] = data & 0xff;
177
break;
178
default:
179
rtw89_warn(rtwdev,
180
"bb rpl ofst {0x%x:0x%x} with unknown type_sub1: %d\n",
181
arg.addr, data, type_sub1);
182
break;
183
}
184
}
185
186
static void
187
rtw89_phy_cfg_bb_gain_op1db_be(struct rtw89_dev *rtwdev,
188
union rtw89_phy_bb_gain_arg_be arg, u32 data)
189
{
190
struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
191
u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
192
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
193
u8 gband = arg.gain_band;
194
u8 type = arg.type;
195
int i;
196
197
switch (type) {
198
case 0:
199
for (i = 0; i < 4; i++, data >>= 8)
200
gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
201
break;
202
case 1:
203
for (i = 4; i < 7; i++, data >>= 8)
204
gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
205
break;
206
case 2:
207
for (i = 0; i < 4; i++, data >>= 8)
208
gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
209
break;
210
case 3:
211
for (i = 4; i < 8; i++, data >>= 8)
212
gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
213
break;
214
default:
215
rtw89_warn(rtwdev,
216
"bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
217
arg.addr, data, type);
218
break;
219
}
220
}
221
222
static void rtw89_phy_config_bb_gain_be(struct rtw89_dev *rtwdev,
223
const struct rtw89_reg2_def *reg,
224
enum rtw89_rf_path rf_path,
225
void *extra_data)
226
{
227
const struct rtw89_chip_info *chip = rtwdev->chip;
228
union rtw89_phy_bb_gain_arg_be arg = { .addr = reg->addr };
229
struct rtw89_efuse *efuse = &rtwdev->efuse;
230
u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
231
u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
232
233
if (bw_type >= RTW89_BB_BW_NR_BE)
234
return;
235
236
if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR_BE)
237
return;
238
239
if (path >= chip->rf_path_num)
240
return;
241
242
if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
243
rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
244
return;
245
}
246
247
switch (arg.cfg_type) {
248
case 0:
249
rtw89_phy_cfg_bb_gain_error_be(rtwdev, arg, reg->data);
250
break;
251
case 1:
252
rtw89_phy_cfg_bb_rpl_ofst_be(rtwdev, arg, reg->data);
253
break;
254
case 2:
255
/* ignore BB gain bypass */
256
break;
257
case 3:
258
rtw89_phy_cfg_bb_gain_op1db_be(rtwdev, arg, reg->data);
259
break;
260
case 4:
261
/* This cfg_type is only used by rfe_type >= 50 with eFEM */
262
if (efuse->rfe_type < 50)
263
break;
264
fallthrough;
265
default:
266
rtw89_warn(rtwdev,
267
"bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
268
arg.addr, reg->data, arg.cfg_type);
269
break;
270
}
271
}
272
273
static void rtw89_phy_preinit_rf_nctl_be(struct rtw89_dev *rtwdev)
274
{
275
rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C0, B_GOTX_IQKDPK, 0x3);
276
rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C1, B_GOTX_IQKDPK, 0x3);
277
rtw89_phy_write32_mask(rtwdev, R_IQKDPK_HC, B_IQKDPK_HC, 0x1);
278
rtw89_phy_write32_mask(rtwdev, R_CLK_GCK, B_CLK_GCK, 0x00fffff);
279
rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
280
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
281
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST, B_IQK_DPK_PRST, 0x1);
282
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_C1, B_IQK_DPK_PRST, 0x1);
283
rtw89_phy_write32_mask(rtwdev, R_TXRFC, B_TXRFC_RST, 0x1);
284
285
if (rtwdev->dbcc_en) {
286
rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST_C1, B_IQK_DPK_RST, 0x1);
287
rtw89_phy_write32_mask(rtwdev, R_TXRFC_C1, B_TXRFC_RST, 0x1);
288
}
289
}
290
291
static
292
void rtw89_phy_bb_wrap_pwr_by_macid_init(struct rtw89_dev *rtwdev)
293
{
294
u32 macid_idx, cr, base_macid_lmt, max_macid = 32;
295
296
base_macid_lmt = R_BE_PWR_MACID_LMT_BASE;
297
298
for (macid_idx = 0; macid_idx < 4 * max_macid; macid_idx += 4) {
299
cr = base_macid_lmt + macid_idx;
300
rtw89_write32(rtwdev, cr, 0x03007F7F);
301
}
302
}
303
304
static
305
void rtw89_phy_bb_wrap_tx_path_by_macid_init(struct rtw89_dev *rtwdev)
306
{
307
int i, max_macid = 32;
308
u32 cr = R_BE_PWR_MACID_PATH_BASE;
309
310
for (i = 0; i < max_macid; i++, cr += 4)
311
rtw89_write32(rtwdev, cr, 0x03C86000);
312
}
313
314
static void rtw89_phy_bb_wrap_tpu_set_all(struct rtw89_dev *rtwdev,
315
enum rtw89_mac_idx mac_idx)
316
{
317
u32 addr;
318
319
for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4)
320
rtw89_write32(rtwdev, addr, 0);
321
for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4)
322
rtw89_write32(rtwdev, addr, 0);
323
for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4)
324
rtw89_write32(rtwdev, addr, 0);
325
326
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_REF_CTRL, mac_idx);
327
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMT_DB, 0);
328
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_LMTBF, mac_idx);
329
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMTBF_DB, 0);
330
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
331
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_BYRATE_DB, 0);
332
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
333
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_RULMT_DB, 0);
334
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_SW, mac_idx);
335
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_SW_DB, 0);
336
}
337
338
static
339
void rtw89_phy_bb_wrap_listen_path_en_init(struct rtw89_dev *rtwdev)
340
{
341
u32 addr;
342
int ret;
343
344
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
345
if (ret)
346
return;
347
348
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_LISTEN_PATH, RTW89_MAC_1);
349
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_LISTEN_PATH_EN, 0x2);
350
}
351
352
static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
353
enum rtw89_mac_idx mac_idx)
354
{
355
u32 addr;
356
357
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_LMT, mac_idx);
358
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_LMT_ON, 0);
359
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, mac_idx);
360
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RATE_ON, 0);
361
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
362
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
363
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
364
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
365
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ALL, 0);
366
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
367
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
368
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
369
rtw89_write32_mask(rtwdev, addr, B_BE_FORCE_PWR_BY_RATE_EN, 0);
370
}
371
372
static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
373
enum rtw89_mac_idx mac_idx)
374
{
375
u32 addr;
376
377
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM, mac_idx);
378
rtw89_write32(rtwdev, addr, 0xE4E431);
379
380
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM_SS, mac_idx);
381
rtw89_write32_mask(rtwdev, addr, 0x7, 0);
382
}
383
384
static void rtw89_phy_bb_wrap_ul_pwr(struct rtw89_dev *rtwdev)
385
{
386
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
387
u8 mac_idx;
388
u32 addr;
389
390
if (chip_id != RTL8922A)
391
return;
392
393
for (mac_idx = 0; mac_idx < RTW89_MAC_NUM; mac_idx++) {
394
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RSSI_TARGET_LMT, mac_idx);
395
rtw89_write32(rtwdev, addr, 0x0201FE00);
396
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_TH, mac_idx);
397
rtw89_write32(rtwdev, addr, 0x00FFEC7E);
398
}
399
}
400
401
static void __rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev,
402
enum rtw89_mac_idx mac_idx)
403
{
404
rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
405
rtw89_phy_bb_wrap_tx_path_by_macid_init(rtwdev);
406
rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
407
rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
408
rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
409
rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
410
rtw89_phy_bb_wrap_ul_pwr(rtwdev);
411
}
412
413
static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
414
{
415
__rtw89_phy_bb_wrap_init_be(rtwdev, RTW89_MAC_0);
416
if (rtwdev->dbcc_en)
417
__rtw89_phy_bb_wrap_init_be(rtwdev, RTW89_MAC_1);
418
}
419
420
static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
421
{
422
rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG_LEN, 0x0);
423
rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG, 0xf);
424
rtw89_phy_write32_mask(rtwdev, R_CHINFO_DATA, B_CHINFO_DATA_BITMAP, 0x1);
425
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_ELM_BITMAP, 0x40303);
426
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_SRC, 0x0);
427
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_TYPE, 0x3);
428
rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_SCAL, 0x0);
429
}
430
431
struct rtw89_byr_spec_ent_be {
432
struct rtw89_rate_desc init;
433
u8 num_of_idx;
434
bool no_over_bw40;
435
bool no_multi_nss;
436
};
437
438
static const struct rtw89_byr_spec_ent_be rtw89_byr_spec_be[] = {
439
{
440
.init = { .rs = RTW89_RS_CCK },
441
.num_of_idx = RTW89_RATE_CCK_NUM,
442
.no_over_bw40 = true,
443
.no_multi_nss = true,
444
},
445
{
446
.init = { .rs = RTW89_RS_OFDM },
447
.num_of_idx = RTW89_RATE_OFDM_NUM,
448
.no_multi_nss = true,
449
},
450
{
451
.init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_NON_OFDMA },
452
.num_of_idx = 2,
453
.no_multi_nss = true,
454
},
455
{
456
.init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_OFDMA },
457
.num_of_idx = 2,
458
.no_multi_nss = true,
459
},
460
{
461
.init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_NON_OFDMA },
462
.num_of_idx = 14,
463
},
464
{
465
.init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_NON_OFDMA },
466
.num_of_idx = RTW89_RATE_HEDCM_NUM,
467
},
468
{
469
.init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_OFDMA },
470
.num_of_idx = 14,
471
},
472
{
473
.init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_OFDMA },
474
.num_of_idx = RTW89_RATE_HEDCM_NUM,
475
},
476
};
477
478
static
479
void __phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev, u8 band, u8 bw,
480
u8 nss, u32 *addr, enum rtw89_phy_idx phy_idx)
481
{
482
const struct rtw89_byr_spec_ent_be *ent;
483
struct rtw89_rate_desc desc;
484
int pos = 0;
485
int i, j;
486
u32 val;
487
s8 v[4];
488
489
for (i = 0; i < ARRAY_SIZE(rtw89_byr_spec_be); i++) {
490
ent = &rtw89_byr_spec_be[i];
491
492
if (bw > RTW89_CHANNEL_WIDTH_40 && ent->no_over_bw40)
493
continue;
494
if (nss > RTW89_NSS_1 && ent->no_multi_nss)
495
continue;
496
497
desc = ent->init;
498
desc.nss = nss;
499
for (j = 0; j < ent->num_of_idx; j++, desc.idx++) {
500
v[pos] = rtw89_phy_read_txpwr_byrate(rtwdev, band, bw,
501
&desc);
502
pos = (pos + 1) % 4;
503
if (pos)
504
continue;
505
506
val = u32_encode_bits(v[0], GENMASK(7, 0)) |
507
u32_encode_bits(v[1], GENMASK(15, 8)) |
508
u32_encode_bits(v[2], GENMASK(23, 16)) |
509
u32_encode_bits(v[3], GENMASK(31, 24));
510
511
rtw89_mac_txpwr_write32(rtwdev, phy_idx, *addr, val);
512
*addr += 4;
513
}
514
}
515
}
516
517
static void rtw89_phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev,
518
const struct rtw89_chan *chan,
519
enum rtw89_phy_idx phy_idx)
520
{
521
u32 addr = R_BE_PWR_BY_RATE;
522
u8 band = chan->band_type;
523
u8 bw, nss;
524
525
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
526
"[TXPWR] set txpwr byrate on band %d\n", band);
527
528
for (bw = 0; bw <= RTW89_CHANNEL_WIDTH_320; bw++)
529
for (nss = 0; nss <= RTW89_NSS_2; nss++)
530
__phy_set_txpwr_byrate_be(rtwdev, band, bw, nss,
531
&addr, phy_idx);
532
}
533
534
static void rtw89_phy_set_txpwr_offset_be(struct rtw89_dev *rtwdev,
535
const struct rtw89_chan *chan,
536
enum rtw89_phy_idx phy_idx)
537
{
538
struct rtw89_rate_desc desc = {
539
.nss = RTW89_NSS_1,
540
.rs = RTW89_RS_OFFSET,
541
};
542
u8 band = chan->band_type;
543
s8 v[RTW89_RATE_OFFSET_NUM_BE] = {};
544
u32 val;
545
546
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
547
"[TXPWR] set txpwr offset on band %d\n", band);
548
549
for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_BE; desc.idx++)
550
v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc);
551
552
val = u32_encode_bits(v[RTW89_RATE_OFFSET_CCK], GENMASK(3, 0)) |
553
u32_encode_bits(v[RTW89_RATE_OFFSET_OFDM], GENMASK(7, 4)) |
554
u32_encode_bits(v[RTW89_RATE_OFFSET_HT], GENMASK(11, 8)) |
555
u32_encode_bits(v[RTW89_RATE_OFFSET_VHT], GENMASK(15, 12)) |
556
u32_encode_bits(v[RTW89_RATE_OFFSET_HE], GENMASK(19, 16)) |
557
u32_encode_bits(v[RTW89_RATE_OFFSET_EHT], GENMASK(23, 20)) |
558
u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_HE], GENMASK(27, 24)) |
559
u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_EHT], GENMASK(31, 28));
560
561
rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_BE_PWR_RATE_OFST_CTRL, val);
562
}
563
564
static void
565
fill_limit_nonbf_bf(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM],
566
u8 band, u8 bw, u8 ntx, u8 rs, u8 ch)
567
{
568
int bf;
569
570
for (bf = 0; bf < RTW89_BF_NUM; bf++)
571
(*ptr)[bf] = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, ntx,
572
rs, bf, ch);
573
}
574
575
static void
576
fill_limit_nonbf_bf_min(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM],
577
u8 band, u8 bw, u8 ntx, u8 rs, u8 ch1, u8 ch2)
578
{
579
s8 v1[RTW89_BF_NUM];
580
s8 v2[RTW89_BF_NUM];
581
int bf;
582
583
fill_limit_nonbf_bf(rtwdev, &v1, band, bw, ntx, rs, ch1);
584
fill_limit_nonbf_bf(rtwdev, &v2, band, bw, ntx, rs, ch2);
585
586
for (bf = 0; bf < RTW89_BF_NUM; bf++)
587
(*ptr)[bf] = min(v1[bf], v2[bf]);
588
}
589
590
static void phy_fill_limit_20m_be(struct rtw89_dev *rtwdev,
591
struct rtw89_txpwr_limit_be *lmt,
592
u8 band, u8 ntx, u8 ch)
593
{
594
fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band,
595
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch);
596
fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band,
597
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch);
598
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
599
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, ch);
600
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
601
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch);
602
}
603
604
static void phy_fill_limit_40m_be(struct rtw89_dev *rtwdev,
605
struct rtw89_txpwr_limit_be *lmt,
606
u8 band, u8 ntx, u8 ch, u8 pri_ch)
607
{
608
fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band,
609
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch - 2);
610
fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band,
611
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch);
612
613
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
614
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
615
616
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
617
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
618
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
619
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
620
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
621
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch);
622
}
623
624
static void phy_fill_limit_80m_be(struct rtw89_dev *rtwdev,
625
struct rtw89_txpwr_limit_be *lmt,
626
u8 band, u8 ntx, u8 ch, u8 pri_ch)
627
{
628
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
629
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
630
631
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
632
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6);
633
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
634
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
635
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band,
636
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
637
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band,
638
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6);
639
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
640
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4);
641
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band,
642
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4);
643
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band,
644
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch);
645
646
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band,
647
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
648
ch - 4, ch + 4);
649
}
650
651
static void phy_fill_limit_160m_be(struct rtw89_dev *rtwdev,
652
struct rtw89_txpwr_limit_be *lmt,
653
u8 band, u8 ntx, u8 ch, u8 pri_ch)
654
{
655
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
656
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
657
658
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
659
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14);
660
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
661
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10);
662
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band,
663
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6);
664
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band,
665
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
666
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band,
667
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
668
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band,
669
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6);
670
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band,
671
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10);
672
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band,
673
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14);
674
675
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
676
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12);
677
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band,
678
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4);
679
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band,
680
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4);
681
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band,
682
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12);
683
684
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band,
685
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8);
686
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band,
687
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8);
688
689
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band,
690
RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch);
691
692
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band,
693
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
694
ch - 12, ch - 4);
695
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band,
696
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
697
ch + 4, ch + 12);
698
}
699
700
static void phy_fill_limit_320m_be(struct rtw89_dev *rtwdev,
701
struct rtw89_txpwr_limit_be *lmt,
702
u8 band, u8 ntx, u8 ch, u8 pri_ch)
703
{
704
fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band,
705
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch);
706
707
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band,
708
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 30);
709
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band,
710
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 26);
711
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band,
712
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 22);
713
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band,
714
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 18);
715
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band,
716
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14);
717
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band,
718
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10);
719
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band,
720
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6);
721
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band,
722
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2);
723
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[8], band,
724
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2);
725
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[9], band,
726
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6);
727
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[10], band,
728
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10);
729
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[11], band,
730
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14);
731
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[12], band,
732
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 18);
733
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[13], band,
734
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 22);
735
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[14], band,
736
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 26);
737
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[15], band,
738
RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 30);
739
740
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band,
741
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 28);
742
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band,
743
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 20);
744
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band,
745
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12);
746
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band,
747
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4);
748
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[4], band,
749
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4);
750
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[5], band,
751
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12);
752
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[6], band,
753
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 20);
754
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[7], band,
755
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 28);
756
757
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band,
758
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 24);
759
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band,
760
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8);
761
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[2], band,
762
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8);
763
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[3], band,
764
RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 24);
765
766
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band,
767
RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch - 16);
768
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[1], band,
769
RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch + 16);
770
771
fill_limit_nonbf_bf(rtwdev, &lmt->mcs_320m, band,
772
RTW89_CHANNEL_WIDTH_320, ntx, RTW89_RS_MCS, ch);
773
774
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band,
775
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
776
ch - 28, ch - 20);
777
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band,
778
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
779
ch - 12, ch - 4);
780
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_4p5, band,
781
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
782
ch + 4, ch + 12);
783
fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_6p5, band,
784
RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS,
785
ch + 20, ch + 28);
786
}
787
788
static void rtw89_phy_fill_limit_be(struct rtw89_dev *rtwdev,
789
const struct rtw89_chan *chan,
790
struct rtw89_txpwr_limit_be *lmt,
791
u8 ntx)
792
{
793
u8 band = chan->band_type;
794
u8 pri_ch = chan->primary_channel;
795
u8 ch = chan->channel;
796
u8 bw = chan->band_width;
797
798
memset(lmt, 0, sizeof(*lmt));
799
800
switch (bw) {
801
case RTW89_CHANNEL_WIDTH_20:
802
phy_fill_limit_20m_be(rtwdev, lmt, band, ntx, ch);
803
break;
804
case RTW89_CHANNEL_WIDTH_40:
805
phy_fill_limit_40m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
806
break;
807
case RTW89_CHANNEL_WIDTH_80:
808
phy_fill_limit_80m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
809
break;
810
case RTW89_CHANNEL_WIDTH_160:
811
phy_fill_limit_160m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
812
break;
813
case RTW89_CHANNEL_WIDTH_320:
814
phy_fill_limit_320m_be(rtwdev, lmt, band, ntx, ch, pri_ch);
815
break;
816
}
817
}
818
819
static void rtw89_phy_set_txpwr_limit_be(struct rtw89_dev *rtwdev,
820
const struct rtw89_chan *chan,
821
enum rtw89_phy_idx phy_idx)
822
{
823
struct rtw89_txpwr_limit_be lmt;
824
const s8 *ptr;
825
u32 addr, val;
826
u8 i, j;
827
828
BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_be) !=
829
RTW89_TXPWR_LMT_PAGE_SIZE_BE);
830
831
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
832
"[TXPWR] set txpwr limit on band %d bw %d\n",
833
chan->band_type, chan->band_width);
834
835
addr = R_BE_PWR_LMT;
836
for (i = 0; i <= RTW89_NSS_2; i++) {
837
rtw89_phy_fill_limit_be(rtwdev, chan, &lmt, i);
838
839
ptr = (s8 *)&lmt;
840
for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_BE;
841
j += 4, addr += 4, ptr += 4) {
842
val = u32_encode_bits(ptr[0], GENMASK(7, 0)) |
843
u32_encode_bits(ptr[1], GENMASK(15, 8)) |
844
u32_encode_bits(ptr[2], GENMASK(23, 16)) |
845
u32_encode_bits(ptr[3], GENMASK(31, 24));
846
847
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
848
}
849
}
850
}
851
852
static void fill_limit_ru_each(struct rtw89_dev *rtwdev, u8 index,
853
struct rtw89_txpwr_limit_ru_be *lmt_ru,
854
u8 band, u8 ntx, u8 ch)
855
{
856
lmt_ru->ru26[index] =
857
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, ntx, ch);
858
lmt_ru->ru52[index] =
859
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52, ntx, ch);
860
lmt_ru->ru106[index] =
861
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106, ntx, ch);
862
lmt_ru->ru52_26[index] =
863
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52_26, ntx, ch);
864
lmt_ru->ru106_26[index] =
865
rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106_26, ntx, ch);
866
}
867
868
static void phy_fill_limit_ru_20m_be(struct rtw89_dev *rtwdev,
869
struct rtw89_txpwr_limit_ru_be *lmt_ru,
870
u8 band, u8 ntx, u8 ch)
871
{
872
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch);
873
}
874
875
static void phy_fill_limit_ru_40m_be(struct rtw89_dev *rtwdev,
876
struct rtw89_txpwr_limit_ru_be *lmt_ru,
877
u8 band, u8 ntx, u8 ch)
878
{
879
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 2);
880
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch + 2);
881
}
882
883
static void phy_fill_limit_ru_80m_be(struct rtw89_dev *rtwdev,
884
struct rtw89_txpwr_limit_ru_be *lmt_ru,
885
u8 band, u8 ntx, u8 ch)
886
{
887
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 6);
888
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 2);
889
fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch + 2);
890
fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch + 6);
891
}
892
893
static void phy_fill_limit_ru_160m_be(struct rtw89_dev *rtwdev,
894
struct rtw89_txpwr_limit_ru_be *lmt_ru,
895
u8 band, u8 ntx, u8 ch)
896
{
897
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 14);
898
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 10);
899
fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 6);
900
fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 2);
901
fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch + 2);
902
fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch + 6);
903
fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch + 10);
904
fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch + 14);
905
}
906
907
static void phy_fill_limit_ru_320m_be(struct rtw89_dev *rtwdev,
908
struct rtw89_txpwr_limit_ru_be *lmt_ru,
909
u8 band, u8 ntx, u8 ch)
910
{
911
fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 30);
912
fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 26);
913
fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 22);
914
fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 18);
915
fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch - 14);
916
fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch - 10);
917
fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch - 6);
918
fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch - 2);
919
fill_limit_ru_each(rtwdev, 8, lmt_ru, band, ntx, ch + 2);
920
fill_limit_ru_each(rtwdev, 9, lmt_ru, band, ntx, ch + 6);
921
fill_limit_ru_each(rtwdev, 10, lmt_ru, band, ntx, ch + 10);
922
fill_limit_ru_each(rtwdev, 11, lmt_ru, band, ntx, ch + 14);
923
fill_limit_ru_each(rtwdev, 12, lmt_ru, band, ntx, ch + 18);
924
fill_limit_ru_each(rtwdev, 13, lmt_ru, band, ntx, ch + 22);
925
fill_limit_ru_each(rtwdev, 14, lmt_ru, band, ntx, ch + 26);
926
fill_limit_ru_each(rtwdev, 15, lmt_ru, band, ntx, ch + 30);
927
}
928
929
static void rtw89_phy_fill_limit_ru_be(struct rtw89_dev *rtwdev,
930
const struct rtw89_chan *chan,
931
struct rtw89_txpwr_limit_ru_be *lmt_ru,
932
u8 ntx)
933
{
934
u8 band = chan->band_type;
935
u8 ch = chan->channel;
936
u8 bw = chan->band_width;
937
938
memset(lmt_ru, 0, sizeof(*lmt_ru));
939
940
switch (bw) {
941
case RTW89_CHANNEL_WIDTH_20:
942
phy_fill_limit_ru_20m_be(rtwdev, lmt_ru, band, ntx, ch);
943
break;
944
case RTW89_CHANNEL_WIDTH_40:
945
phy_fill_limit_ru_40m_be(rtwdev, lmt_ru, band, ntx, ch);
946
break;
947
case RTW89_CHANNEL_WIDTH_80:
948
phy_fill_limit_ru_80m_be(rtwdev, lmt_ru, band, ntx, ch);
949
break;
950
case RTW89_CHANNEL_WIDTH_160:
951
phy_fill_limit_ru_160m_be(rtwdev, lmt_ru, band, ntx, ch);
952
break;
953
case RTW89_CHANNEL_WIDTH_320:
954
phy_fill_limit_ru_320m_be(rtwdev, lmt_ru, band, ntx, ch);
955
break;
956
}
957
}
958
959
static void rtw89_phy_set_txpwr_limit_ru_be(struct rtw89_dev *rtwdev,
960
const struct rtw89_chan *chan,
961
enum rtw89_phy_idx phy_idx)
962
{
963
struct rtw89_txpwr_limit_ru_be lmt_ru;
964
const s8 *ptr;
965
u32 addr, val;
966
u8 i, j;
967
968
BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_be) !=
969
RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE);
970
971
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
972
"[TXPWR] set txpwr limit ru on band %d bw %d\n",
973
chan->band_type, chan->band_width);
974
975
addr = R_BE_PWR_RU_LMT;
976
for (i = 0; i <= RTW89_NSS_2; i++) {
977
rtw89_phy_fill_limit_ru_be(rtwdev, chan, &lmt_ru, i);
978
979
ptr = (s8 *)&lmt_ru;
980
for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE;
981
j += 4, addr += 4, ptr += 4) {
982
val = u32_encode_bits(ptr[0], GENMASK(7, 0)) |
983
u32_encode_bits(ptr[1], GENMASK(15, 8)) |
984
u32_encode_bits(ptr[2], GENMASK(23, 16)) |
985
u32_encode_bits(ptr[3], GENMASK(31, 24));
986
987
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
988
}
989
}
990
}
991
992
const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
993
.cr_base = 0x20000,
994
.ccx = &rtw89_ccx_regs_be,
995
.physts = &rtw89_physts_regs_be,
996
.cfo = &rtw89_cfo_regs_be,
997
.phy0_phy1_offset = rtw89_phy0_phy1_offset_be,
998
.config_bb_gain = rtw89_phy_config_bb_gain_be,
999
.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be,
1000
.bb_wrap_init = rtw89_phy_bb_wrap_init_be,
1001
.ch_info_init = rtw89_phy_ch_info_init_be,
1002
1003
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be,
1004
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_be,
1005
.set_txpwr_limit = rtw89_phy_set_txpwr_limit_be,
1006
.set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_be,
1007
};
1008
EXPORT_SYMBOL(rtw89_phy_gen_be);
1009
1010