Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/iwlwifi/mld/mlo.c
48287 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
3
* Copyright (C) 2024-2025 Intel Corporation
4
*/
5
#include "mlo.h"
6
#include "phy.h"
7
8
/* Block reasons helper */
9
#define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \
10
HOW(PREVENTION) \
11
HOW(WOWLAN) \
12
HOW(ROC) \
13
HOW(NON_BSS) \
14
HOW(TMP_NON_BSS) \
15
HOW(TPT)
16
17
static const char *
18
iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
19
{
20
/* Using switch without "default" will warn about missing entries */
21
switch (blocked) {
22
#define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
23
HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
24
#undef REASON_CASE
25
}
26
27
return "ERROR";
28
}
29
30
static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
31
{
32
#define NAME_FMT(x) "%s"
33
#define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
34
IWL_DEBUG_INFO(mld,
35
"EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
36
" (0x%x)\n",
37
HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR)
38
mask);
39
#undef NAME_FMT
40
#undef NAME_PR
41
}
42
43
/* Exit reasons helper */
44
#define HANDLE_EMLSR_EXIT_REASONS(HOW) \
45
HOW(BLOCK) \
46
HOW(MISSED_BEACON) \
47
HOW(FAIL_ENTRY) \
48
HOW(CSA) \
49
HOW(EQUAL_BAND) \
50
HOW(LOW_RSSI) \
51
HOW(LINK_USAGE) \
52
HOW(BT_COEX) \
53
HOW(CHAN_LOAD) \
54
HOW(RFI) \
55
HOW(FW_REQUEST) \
56
HOW(INVALID)
57
58
static const char *
59
iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
60
{
61
/* Using switch without "default" will warn about missing entries */
62
switch (exit) {
63
#define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
64
HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
65
#undef REASON_CASE
66
}
67
68
return "ERROR";
69
}
70
71
static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
72
{
73
#define NAME_FMT(x) "%s"
74
#define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
75
IWL_DEBUG_INFO(mld,
76
"EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
77
" (0x%x)\n",
78
HANDLE_EMLSR_EXIT_REASONS(NAME_PR)
79
mask);
80
#undef NAME_FMT
81
#undef NAME_PR
82
}
83
84
void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
85
{
86
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
87
emlsr.prevent_done_wk.work);
88
struct ieee80211_vif *vif =
89
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
90
91
if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
92
IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
93
return;
94
95
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
96
IWL_MLD_EMLSR_BLOCKED_PREVENTION);
97
}
98
99
void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
100
struct wiphy_work *wk)
101
{
102
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
103
emlsr.tmp_non_bss_done_wk.work);
104
struct ieee80211_vif *vif =
105
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
106
107
if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
108
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
109
return;
110
111
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
112
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
113
}
114
115
#define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
116
#define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
117
118
/* Exit reasons that can cause longer EMLSR prevention */
119
#define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
120
IWL_MLD_EMLSR_EXIT_LINK_USAGE | \
121
IWL_MLD_EMLSR_EXIT_FW_REQUEST)
122
#define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400)
123
124
#define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300)
125
#define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600)
126
127
static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
128
struct iwl_mld_vif *mld_vif,
129
enum iwl_mld_emlsr_exit reason)
130
{
131
unsigned long delay;
132
133
/*
134
* Reset the counter if more than 400 seconds have passed between one
135
* exit and the other, or if we exited due to a different reason.
136
* Will also reset the counter after the long prevention is done.
137
*/
138
if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
139
IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
140
mld_vif->emlsr.last_exit_reason != reason)
141
mld_vif->emlsr.exit_repeat_count = 0;
142
143
mld_vif->emlsr.last_exit_reason = reason;
144
mld_vif->emlsr.last_exit_ts = jiffies;
145
mld_vif->emlsr.exit_repeat_count++;
146
147
/*
148
* Do not add a prevention when the reason was a block. For a block,
149
* EMLSR will be enabled again on unblock.
150
*/
151
if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
152
return;
153
154
/* Set prevention for a minimum of 30 seconds */
155
mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
156
delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
157
158
/* Handle repeats for reasons that can cause long prevention */
159
if (mld_vif->emlsr.exit_repeat_count > 1 &&
160
reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
161
if (mld_vif->emlsr.exit_repeat_count == 2)
162
delay = IWL_MLD_EMLSR_PREVENT_SHORT;
163
else
164
delay = IWL_MLD_EMLSR_PREVENT_LONG;
165
166
/*
167
* The timeouts are chosen so that this will not happen, i.e.
168
* IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
169
*/
170
WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
171
}
172
173
IWL_DEBUG_INFO(mld,
174
"Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
175
delay / HZ, mld_vif->emlsr.exit_repeat_count,
176
iwl_mld_get_emlsr_exit_string(reason), reason);
177
178
wiphy_delayed_work_queue(mld->wiphy,
179
&mld_vif->emlsr.prevent_done_wk, delay);
180
}
181
182
static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
183
struct ieee80211_chanctx_conf *ctx,
184
void *dat)
185
{
186
struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
187
188
/* It is ok to do it for all chanctx (and not only for the ones that
189
* belong to the EMLSR vif) since EMLSR is not allowed if there is
190
* another vif.
191
*/
192
phy->avg_channel_load_not_by_us = 0;
193
}
194
195
static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
196
enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
197
bool sync)
198
{
199
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
200
u16 new_active_links;
201
int ret = 0;
202
203
lockdep_assert_wiphy(mld->wiphy);
204
205
/* On entry failure need to exit anyway, even if entered from debugfs */
206
if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
207
return 0;
208
209
/* Ignore exit request if EMLSR is not active */
210
if (!iwl_mld_emlsr_active(vif))
211
return 0;
212
213
if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
214
return 0;
215
216
if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
217
link_to_keep = __ffs(vif->active_links);
218
219
new_active_links = BIT(link_to_keep);
220
IWL_DEBUG_INFO(mld,
221
"Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
222
iwl_mld_get_emlsr_exit_string(exit), exit,
223
vif->active_links, new_active_links);
224
225
if (sync)
226
ret = ieee80211_set_active_links(vif, new_active_links);
227
else
228
ieee80211_set_active_links_async(vif, new_active_links);
229
230
/* Update latest exit reason and check EMLSR prevention */
231
iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
232
233
/* channel_load_not_by_us is invalid when in EMLSR.
234
* Clear it so wrong values won't be used.
235
*/
236
ieee80211_iter_chan_contexts_atomic(mld->hw,
237
iwl_mld_clear_avg_chan_load_iter,
238
NULL);
239
240
return ret;
241
}
242
243
void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
244
enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
245
{
246
_iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
247
}
248
249
static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
250
enum iwl_mld_emlsr_blocked reason,
251
u8 link_to_keep, bool sync)
252
{
253
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
254
255
lockdep_assert_wiphy(mld->wiphy);
256
257
if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
258
return 0;
259
260
if (mld_vif->emlsr.blocked_reasons & reason)
261
return 0;
262
263
mld_vif->emlsr.blocked_reasons |= reason;
264
265
IWL_DEBUG_INFO(mld,
266
"Blocking EMLSR mode. reason = %s (0x%x)\n",
267
iwl_mld_get_emlsr_blocked_string(reason), reason);
268
iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
269
270
if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
271
wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
272
&mld_vif->emlsr.check_tpt_wk);
273
274
return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
275
link_to_keep, sync);
276
}
277
278
void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
279
enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
280
{
281
_iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
282
}
283
284
int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
285
enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
286
{
287
return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
288
}
289
290
#define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (10 * HZ)
291
292
static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
293
struct ieee80211_vif *vif)
294
{
295
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
296
int ret;
297
298
if (!iwl_mld_vif_has_emlsr_cap(vif))
299
return;
300
301
ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
302
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
303
iwl_mld_get_primary_link(vif));
304
if (ret)
305
return;
306
307
wiphy_delayed_work_queue(mld_vif->mld->wiphy,
308
&mld_vif->emlsr.tmp_non_bss_done_wk,
309
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
310
}
311
312
void iwl_mld_emlsr_block_tmp_non_bss(struct iwl_mld *mld)
313
{
314
ieee80211_iterate_active_interfaces_mtx(mld->hw,
315
IEEE80211_IFACE_ITER_NORMAL,
316
iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
317
NULL);
318
}
319
320
static void _iwl_mld_select_links(struct iwl_mld *mld,
321
struct ieee80211_vif *vif);
322
323
void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
324
enum iwl_mld_emlsr_blocked reason)
325
{
326
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
327
328
lockdep_assert_wiphy(mld->wiphy);
329
330
if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
331
return;
332
333
if (!(mld_vif->emlsr.blocked_reasons & reason))
334
return;
335
336
mld_vif->emlsr.blocked_reasons &= ~reason;
337
338
IWL_DEBUG_INFO(mld,
339
"Unblocking EMLSR mode. reason = %s (0x%x)\n",
340
iwl_mld_get_emlsr_blocked_string(reason), reason);
341
iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
342
343
if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
344
wiphy_delayed_work_queue(mld_vif->mld->wiphy,
345
&mld_vif->emlsr.check_tpt_wk,
346
round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
347
348
if (mld_vif->emlsr.blocked_reasons)
349
return;
350
351
IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n");
352
iwl_mld_int_mlo_scan(mld, vif);
353
}
354
355
static void
356
iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
357
struct ieee80211_vif *vif)
358
{
359
const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
360
enum iwl_mvm_fw_esr_recommendation action;
361
const struct iwl_esr_mode_notif *notif = NULL;
362
363
if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP,
364
ESR_MODE_NOTIF, 0) > 1) {
365
notif = (void *)data;
366
action = le32_to_cpu(notif->action);
367
} else {
368
const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data;
369
370
action = le32_to_cpu(notif_v1->action);
371
}
372
373
if (!iwl_mld_vif_has_emlsr_cap(vif))
374
return;
375
376
switch (action) {
377
case ESR_RECOMMEND_LEAVE:
378
if (notif)
379
IWL_DEBUG_INFO(mld_vif->mld,
380
"FW recommend leave reason = 0x%x\n",
381
le32_to_cpu(notif->leave_reason_mask));
382
383
iwl_mld_exit_emlsr(mld_vif->mld, vif,
384
IWL_MLD_EMLSR_EXIT_FW_REQUEST,
385
iwl_mld_get_primary_link(vif));
386
break;
387
case ESR_FORCE_LEAVE:
388
if (notif)
389
IWL_DEBUG_INFO(mld_vif->mld,
390
"FW force leave reason = 0x%x\n",
391
le32_to_cpu(notif->leave_reason_mask));
392
fallthrough;
393
case ESR_RECOMMEND_ENTER:
394
default:
395
IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
396
action);
397
}
398
}
399
400
void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
401
struct iwl_rx_packet *pkt)
402
{
403
ieee80211_iterate_active_interfaces_mtx(mld->hw,
404
IEEE80211_IFACE_ITER_NORMAL,
405
iwl_mld_vif_iter_emlsr_mode_notif,
406
pkt->data);
407
}
408
409
static void
410
iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
411
struct ieee80211_vif *vif)
412
{
413
if (!iwl_mld_vif_has_emlsr_cap(vif))
414
return;
415
416
ieee80211_connection_loss(vif);
417
}
418
419
void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
420
struct iwl_rx_packet *pkt)
421
{
422
const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
423
u32 fw_link_id = le32_to_cpu(notif->link_id);
424
struct ieee80211_bss_conf *bss_conf =
425
iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
426
427
IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
428
le32_to_cpu(notif->activation) ? "enter" : "exit",
429
bss_conf ? bss_conf->link_id : -1,
430
le32_to_cpu(notif->link_id),
431
le32_to_cpu(notif->err_code));
432
433
if (IWL_FW_CHECK(mld, !bss_conf,
434
"FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
435
le32_to_cpu(notif->activation) ? "" : "de",
436
fw_link_id)) {
437
ieee80211_iterate_active_interfaces_mtx(
438
mld->hw, IEEE80211_IFACE_ITER_NORMAL,
439
iwl_mld_vif_iter_disconnect_emlsr, NULL);
440
return;
441
}
442
443
/* Disconnect if we failed to deactivate a link */
444
if (!le32_to_cpu(notif->activation)) {
445
ieee80211_connection_loss(bss_conf->vif);
446
return;
447
}
448
449
/*
450
* We failed to activate the second link, go back to the link specified
451
* by the firmware as that is the one that is still valid now.
452
*/
453
iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
454
bss_conf->link_id);
455
}
456
457
/* Active non-station link tracking */
458
static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
459
struct ieee80211_vif *vif)
460
{
461
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
462
int *count = _data;
463
464
if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
465
return;
466
467
*count += iwl_mld_count_active_links(mld_vif->mld, vif);
468
}
469
470
struct iwl_mld_update_emlsr_block_data {
471
bool block;
472
int result;
473
};
474
475
static void
476
iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
477
struct ieee80211_vif *vif)
478
{
479
struct iwl_mld_update_emlsr_block_data *data = _data;
480
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
481
int ret;
482
483
if (data->block) {
484
ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
485
IWL_MLD_EMLSR_BLOCKED_NON_BSS,
486
iwl_mld_get_primary_link(vif));
487
if (ret)
488
data->result = ret;
489
} else {
490
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
491
IWL_MLD_EMLSR_BLOCKED_NON_BSS);
492
}
493
}
494
495
int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
496
int pending_link_changes)
497
{
498
/* An active link of a non-station vif blocks EMLSR. Upon activation
499
* block EMLSR on the bss vif. Upon deactivation, check if this link
500
* was the last non-station link active, and if so unblock the bss vif
501
*/
502
struct iwl_mld_update_emlsr_block_data block_data = {};
503
int count = pending_link_changes;
504
505
/* No need to count if we are activating a non-BSS link */
506
if (count <= 0)
507
ieee80211_iterate_active_interfaces_mtx(mld->hw,
508
IEEE80211_IFACE_ITER_NORMAL,
509
iwl_mld_count_non_bss_links,
510
&count);
511
512
/*
513
* We could skip updating it if the block change did not change (and
514
* pending_link_changes is non-zero).
515
*/
516
block_data.block = !!count;
517
518
ieee80211_iterate_active_interfaces_mtx(mld->hw,
519
IEEE80211_IFACE_ITER_NORMAL,
520
iwl_mld_vif_iter_update_emlsr_non_bss_block,
521
&block_data);
522
523
return block_data.result;
524
}
525
526
#define EMLSR_SEC_LINK_MIN_PERC 10
527
#define EMLSR_MIN_TX 3000
528
#define EMLSR_MIN_RX 400
529
530
void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
531
{
532
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
533
emlsr.check_tpt_wk.work);
534
struct ieee80211_vif *vif =
535
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
536
struct iwl_mld *mld = mld_vif->mld;
537
struct iwl_mld_sta *mld_sta;
538
struct iwl_mld_link *sec_link;
539
unsigned long total_tx = 0, total_rx = 0;
540
unsigned long sec_link_tx = 0, sec_link_rx = 0;
541
u8 sec_link_tx_perc, sec_link_rx_perc;
542
s8 sec_link_id;
543
544
if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
545
return;
546
547
mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
548
549
/* We only count for the AP sta in a MLO connection */
550
if (!mld_sta->mpdu_counters)
551
return;
552
553
/* This wk should only run when the TPT blocker isn't set.
554
* When the blocker is set, the decision to remove it, as well as
555
* clearing the counters is done in DP (to avoid having a wk every
556
* 5 seconds when idle. When the blocker is unset, we are not idle anyway)
557
*/
558
if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
559
return;
560
/*
561
* TPT is unblocked, need to check if the TPT criteria is still met.
562
*
563
* If EMLSR is active for at least 5 seconds, then we also
564
* need to check the secondary link requirements.
565
*/
566
if (iwl_mld_emlsr_active(vif) &&
567
time_is_before_jiffies(mld_vif->emlsr.last_entry_ts +
568
IWL_MLD_TPT_COUNT_WINDOW)) {
569
sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
570
sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
571
if (WARN_ON_ONCE(!sec_link))
572
return;
573
/* We need the FW ID here */
574
sec_link_id = sec_link->fw_id;
575
} else {
576
sec_link_id = -1;
577
}
578
579
/* Sum up RX and TX MPDUs from the different queues/links */
580
for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
581
struct iwl_mld_per_q_mpdu_counter *queue_counter =
582
&mld_sta->mpdu_counters[q];
583
584
spin_lock_bh(&queue_counter->lock);
585
586
/* The link IDs that doesn't exist will contain 0 */
587
for (int link = 0;
588
link < ARRAY_SIZE(queue_counter->per_link);
589
link++) {
590
total_tx += queue_counter->per_link[link].tx;
591
total_rx += queue_counter->per_link[link].rx;
592
}
593
594
if (sec_link_id != -1) {
595
sec_link_tx += queue_counter->per_link[sec_link_id].tx;
596
sec_link_rx += queue_counter->per_link[sec_link_id].rx;
597
}
598
599
memset(queue_counter->per_link, 0,
600
sizeof(queue_counter->per_link));
601
602
spin_unlock_bh(&queue_counter->lock);
603
}
604
605
IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
606
total_tx, total_rx);
607
608
/* If we don't have enough MPDUs - exit EMLSR */
609
if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
610
total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
611
iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
612
iwl_mld_get_primary_link(vif));
613
return;
614
}
615
616
/* EMLSR is not active */
617
if (sec_link_id == -1)
618
return;
619
620
IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
621
sec_link_id, sec_link_tx, sec_link_rx);
622
623
/* Calculate the percentage of the secondary link TX/RX */
624
sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
625
sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
626
627
/*
628
* The TX/RX percentage is checked only if it exceeds the required
629
* minimum. In addition, RX is checked only if the TX check failed.
630
*/
631
if ((total_tx > EMLSR_MIN_TX &&
632
sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
633
(total_rx > EMLSR_MIN_RX &&
634
sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
635
iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
636
iwl_mld_get_primary_link(vif));
637
return;
638
}
639
640
/* Check again when the next window ends */
641
wiphy_delayed_work_queue(mld_vif->mld->wiphy,
642
&mld_vif->emlsr.check_tpt_wk,
643
round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
644
}
645
646
void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
647
{
648
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
649
emlsr.unblock_tpt_wk);
650
struct ieee80211_vif *vif =
651
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
652
653
iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
654
}
655
656
/*
657
* Link selection
658
*/
659
660
s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
661
const struct cfg80211_chan_def *chandef,
662
bool low)
663
{
664
if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
665
chandef->chan->band != NL80211_BAND_5GHZ &&
666
chandef->chan->band != NL80211_BAND_6GHZ))
667
return S8_MAX;
668
669
#define RSSI_THRESHOLD(_low, _bw) \
670
(_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \
671
: IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
672
673
switch (chandef->width) {
674
case NL80211_CHAN_WIDTH_20_NOHT:
675
case NL80211_CHAN_WIDTH_20:
676
/* 320 MHz has the same thresholds as 20 MHz */
677
case NL80211_CHAN_WIDTH_320:
678
return RSSI_THRESHOLD(low, 20);
679
case NL80211_CHAN_WIDTH_40:
680
return RSSI_THRESHOLD(low, 40);
681
case NL80211_CHAN_WIDTH_80:
682
return RSSI_THRESHOLD(low, 80);
683
case NL80211_CHAN_WIDTH_160:
684
return RSSI_THRESHOLD(low, 160);
685
default:
686
WARN_ON(1);
687
return S8_MAX;
688
}
689
#undef RSSI_THRESHOLD
690
}
691
692
static u32
693
iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
694
struct ieee80211_vif *vif,
695
struct iwl_mld_link_sel_data *link,
696
bool primary)
697
{
698
struct wiphy *wiphy = mld->wiphy;
699
struct ieee80211_bss_conf *conf;
700
u32 ret = 0;
701
702
conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
703
if (WARN_ON_ONCE(!conf))
704
return IWL_MLD_EMLSR_EXIT_INVALID;
705
706
if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
707
ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
708
709
if (link->signal <
710
iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
711
ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
712
713
if (conf->csa_active)
714
ret |= IWL_MLD_EMLSR_EXIT_CSA;
715
716
if (ret) {
717
IWL_DEBUG_INFO(mld,
718
"Link %d is not allowed for EMLSR as %s\n",
719
link->link_id,
720
primary ? "primary" : "secondary");
721
iwl_mld_print_emlsr_exit(mld, ret);
722
}
723
724
return ret;
725
}
726
727
static u8
728
iwl_mld_set_link_sel_data(struct iwl_mld *mld,
729
struct ieee80211_vif *vif,
730
struct iwl_mld_link_sel_data *data,
731
unsigned long usable_links,
732
u8 *best_link_idx)
733
{
734
u8 n_data = 0;
735
u16 max_grade = 0;
736
unsigned long link_id;
737
738
/*
739
* TODO: don't select links that weren't discovered in the last scan
740
* This requires mac80211 (or cfg80211) changes to forward/track when
741
* a BSS was last updated. cfg80211 already tracks this information but
742
* it is not exposed within the kernel.
743
*/
744
for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
745
struct ieee80211_bss_conf *link_conf =
746
link_conf_dereference_protected(vif, link_id);
747
748
if (WARN_ON_ONCE(!link_conf))
749
continue;
750
751
/* Ignore any BSS that was not seen in the last MLO scan */
752
if (ktime_before(link_conf->bss->ts_boottime,
753
mld->scan.last_mlo_scan_time))
754
continue;
755
756
data[n_data].link_id = link_id;
757
data[n_data].chandef = &link_conf->chanreq.oper;
758
data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
759
data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
760
761
if (n_data == 0 || data[n_data].grade > max_grade) {
762
max_grade = data[n_data].grade;
763
*best_link_idx = n_data;
764
}
765
n_data++;
766
}
767
768
return n_data;
769
}
770
771
static u32
772
iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
773
{
774
const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx);
775
776
switch (phy->chandef.width) {
777
case NL80211_CHAN_WIDTH_320:
778
case NL80211_CHAN_WIDTH_160:
779
return 5;
780
case NL80211_CHAN_WIDTH_80:
781
return 7;
782
default:
783
break;
784
}
785
return 10;
786
}
787
788
static bool
789
iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
790
struct ieee80211_vif *vif,
791
const struct iwl_mld_link_sel_data *a,
792
const struct iwl_mld_link_sel_data *b)
793
{
794
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
795
struct iwl_mld_link *link_a =
796
iwl_mld_link_dereference_check(mld_vif, a->link_id);
797
struct ieee80211_chanctx_conf *chanctx_a = NULL;
798
u32 bw_a, bw_b, ratio;
799
u32 primary_load_perc;
800
801
if (!link_a || !link_a->active) {
802
IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n");
803
return false;
804
}
805
806
chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx);
807
808
if (WARN_ON(!chanctx_a))
809
return false;
810
811
primary_load_perc =
812
iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us;
813
814
IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc);
815
816
if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) {
817
IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n");
818
return false;
819
}
820
821
if (iwl_mld_vif_low_latency(mld_vif)) {
822
IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n");
823
return true;
824
}
825
826
if (a->chandef->width <= b->chandef->width)
827
return true;
828
829
bw_a = cfg80211_chandef_get_width(a->chandef);
830
bw_b = cfg80211_chandef_get_width(b->chandef);
831
ratio = bw_a / bw_b;
832
833
switch (ratio) {
834
case 2:
835
return primary_load_perc > 25;
836
case 4:
837
return primary_load_perc > 40;
838
case 8:
839
case 16:
840
return primary_load_perc > 50;
841
}
842
843
return false;
844
}
845
846
VISIBLE_IF_IWLWIFI_KUNIT u32
847
iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
848
struct iwl_mld_link_sel_data *a,
849
struct iwl_mld_link_sel_data *b)
850
{
851
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
852
struct iwl_mld *mld = mld_vif->mld;
853
u32 reason_mask = 0;
854
855
/* Per-link considerations */
856
reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true);
857
if (reason_mask)
858
return reason_mask;
859
860
reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false);
861
if (reason_mask)
862
return reason_mask;
863
864
if (a->chandef->chan->band == b->chandef->chan->band) {
865
const struct cfg80211_chan_def *c_low = a->chandef;
866
const struct cfg80211_chan_def *c_high = b->chandef;
867
u32 c_low_upper_edge, c_high_lower_edge;
868
869
if (c_low->chan->center_freq > c_high->chan->center_freq)
870
swap(c_low, c_high);
871
872
c_low_upper_edge = c_low->chan->center_freq +
873
cfg80211_chandef_get_width(c_low) / 2;
874
c_high_lower_edge = c_high->chan->center_freq -
875
cfg80211_chandef_get_width(c_high) / 2;
876
877
if (a->chandef->chan->band == NL80211_BAND_5GHZ &&
878
c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) {
879
/* This case is fine - HW/FW can deal with it, there's
880
* enough separation between the two channels.
881
*/
882
} else {
883
reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
884
}
885
}
886
if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
887
reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
888
889
if (reason_mask) {
890
IWL_DEBUG_INFO(mld,
891
"Links %d and %d are not a valid pair for EMLSR\n",
892
a->link_id, b->link_id);
893
IWL_DEBUG_INFO(mld,
894
"Links bandwidth are: %d and %d\n",
895
nl80211_chan_width_to_mhz(a->chandef->width),
896
nl80211_chan_width_to_mhz(b->chandef->width));
897
iwl_mld_print_emlsr_exit(mld, reason_mask);
898
}
899
900
return reason_mask;
901
}
902
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state);
903
904
/* Calculation is done with fixed-point with a scaling factor of 1/256 */
905
#define SCALE_FACTOR 256
906
907
/*
908
* Returns the combined grade of two given links.
909
* Returns 0 if EMLSR is not allowed with these 2 links.
910
*/
911
static
912
unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
913
struct ieee80211_vif *vif,
914
struct iwl_mld_link_sel_data *a,
915
struct iwl_mld_link_sel_data *b,
916
u8 *primary_id)
917
{
918
struct ieee80211_bss_conf *primary_conf;
919
struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
920
unsigned int primary_load;
921
922
lockdep_assert_wiphy(wiphy);
923
924
/* a is always primary, b is always secondary */
925
if (b->grade > a->grade)
926
swap(a, b);
927
928
*primary_id = a->link_id;
929
930
if (iwl_mld_emlsr_pair_state(vif, a, b))
931
return 0;
932
933
primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
934
935
if (WARN_ON_ONCE(!primary_conf))
936
return 0;
937
938
primary_load = iwl_mld_get_chan_load(mld, primary_conf);
939
940
/* The more the primary link is loaded, the more worthwhile EMLSR becomes */
941
return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
942
}
943
944
static void _iwl_mld_select_links(struct iwl_mld *mld,
945
struct ieee80211_vif *vif)
946
{
947
struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
948
struct iwl_mld_link_sel_data *best_link;
949
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
950
int max_active_links = iwl_mld_max_active_links(mld, vif);
951
u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
952
u8 best_idx, new_primary, n_data;
953
u16 max_grade;
954
955
lockdep_assert_wiphy(mld->wiphy);
956
957
if (!mld_vif->authorized || hweight16(usable_links) <= 1)
958
return;
959
960
if (WARN(ktime_before(mld->scan.last_mlo_scan_time,
961
ktime_sub_ns(ktime_get_boottime_ns(),
962
5ULL * NSEC_PER_SEC)),
963
"Last MLO scan was too long ago, can't select links\n"))
964
return;
965
966
/* The logic below is simple and not suited for more than 2 links */
967
WARN_ON_ONCE(max_active_links > 2);
968
969
n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
970
&best_idx);
971
972
if (!n_data) {
973
IWL_DEBUG_EHT(mld,
974
"Couldn't find a valid grade for any link!\n");
975
return;
976
}
977
978
/* Default to selecting the single best link */
979
best_link = &data[best_idx];
980
new_primary = best_link->link_id;
981
new_active = BIT(best_link->link_id);
982
max_grade = best_link->grade;
983
984
/* If EMLSR is not possible, activate the best link */
985
if (max_active_links == 1 || n_data == 1 ||
986
!iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
987
mld_vif->emlsr.blocked_reasons)
988
goto set_active;
989
990
/* Try to find the best link combination */
991
for (u8 a = 0; a < n_data; a++) {
992
for (u8 b = a + 1; b < n_data; b++) {
993
u8 best_in_pair;
994
u16 emlsr_grade =
995
iwl_mld_get_emlsr_grade(mld, vif,
996
&data[a], &data[b],
997
&best_in_pair);
998
999
/*
1000
* Prefer (new) EMLSR combination to prefer EMLSR over
1001
* a single link.
1002
*/
1003
if (emlsr_grade < max_grade)
1004
continue;
1005
1006
max_grade = emlsr_grade;
1007
new_primary = best_in_pair;
1008
new_active = BIT(data[a].link_id) |
1009
BIT(data[b].link_id);
1010
}
1011
}
1012
1013
set_active:
1014
IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n",
1015
new_active, new_primary);
1016
1017
mld_vif->emlsr.selected_primary = new_primary;
1018
mld_vif->emlsr.selected_links = new_active;
1019
1020
ieee80211_set_active_links_async(vif, new_active);
1021
}
1022
1023
static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
1024
struct ieee80211_vif *vif)
1025
{
1026
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1027
struct iwl_mld *mld = mld_vif->mld;
1028
1029
_iwl_mld_select_links(mld, vif);
1030
}
1031
1032
void iwl_mld_select_links(struct iwl_mld *mld)
1033
{
1034
ieee80211_iterate_active_interfaces_mtx(mld->hw,
1035
IEEE80211_IFACE_ITER_NORMAL,
1036
iwl_mld_vif_iter_select_links,
1037
NULL);
1038
}
1039
1040
static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
1041
struct ieee80211_vif *vif)
1042
{
1043
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1044
struct iwl_mld *mld = mld_vif->mld;
1045
struct ieee80211_bss_conf *link;
1046
unsigned int link_id;
1047
1048
if (!iwl_mld_vif_has_emlsr_cap(vif))
1049
return;
1050
1051
if (!mld->bt_is_active) {
1052
iwl_mld_retry_emlsr(mld, vif);
1053
return;
1054
}
1055
1056
/* BT is turned ON but we are not in EMLSR, nothing to do */
1057
if (!iwl_mld_emlsr_active(vif))
1058
return;
1059
1060
/* In EMLSR and BT is turned ON */
1061
1062
for_each_vif_active_link(vif, link, link_id) {
1063
if (WARN_ON(!link->chanreq.oper.chan))
1064
continue;
1065
1066
if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
1067
iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
1068
iwl_mld_get_primary_link(vif));
1069
return;
1070
}
1071
}
1072
}
1073
1074
void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
1075
{
1076
ieee80211_iterate_active_interfaces_mtx(mld->hw,
1077
IEEE80211_IFACE_ITER_NORMAL,
1078
iwl_mld_emlsr_check_bt_iter,
1079
NULL);
1080
}
1081
1082
struct iwl_mld_chan_load_data {
1083
struct iwl_mld_phy *phy;
1084
u32 prev_chan_load_not_by_us;
1085
};
1086
1087
static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac,
1088
struct ieee80211_vif *vif)
1089
{
1090
struct iwl_mld_chan_load_data *data = _data;
1091
const struct iwl_mld_phy *phy = data->phy;
1092
struct ieee80211_chanctx_conf *chanctx =
1093
container_of((const void *)phy, struct ieee80211_chanctx_conf,
1094
drv_priv);
1095
struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
1096
struct ieee80211_bss_conf *prim_link;
1097
unsigned int prim_link_id;
1098
1099
prim_link_id = iwl_mld_get_primary_link(vif);
1100
prim_link = link_conf_dereference_protected(vif, prim_link_id);
1101
1102
if (WARN_ON(!prim_link))
1103
return;
1104
1105
if (chanctx != rcu_access_pointer(prim_link->chanctx_conf))
1106
return;
1107
1108
if (iwl_mld_emlsr_active(vif)) {
1109
int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link,
1110
true);
1111
1112
if (chan_load < 0)
1113
return;
1114
1115
/* chan_load is in range [0,255] */
1116
if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD))
1117
iwl_mld_exit_emlsr(mld, vif,
1118
IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
1119
prim_link_id);
1120
} else {
1121
u32 old_chan_load = data->prev_chan_load_not_by_us;
1122
u32 new_chan_load = phy->avg_channel_load_not_by_us;
1123
u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx);
1124
1125
#define THRESHOLD_CROSSED(threshold) \
1126
(old_chan_load <= (threshold) && new_chan_load > (threshold))
1127
1128
if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) ||
1129
THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50))
1130
iwl_mld_retry_emlsr(mld, vif);
1131
#undef THRESHOLD_CROSSED
1132
}
1133
}
1134
1135
void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
1136
struct iwl_mld_phy *phy,
1137
u32 prev_chan_load_not_by_us)
1138
{
1139
struct iwl_mld_chan_load_data data = {
1140
.phy = phy,
1141
.prev_chan_load_not_by_us = prev_chan_load_not_by_us,
1142
};
1143
1144
ieee80211_iterate_active_interfaces_mtx(hw,
1145
IEEE80211_IFACE_ITER_NORMAL,
1146
iwl_mld_chan_load_update_iter,
1147
&data);
1148
}
1149
1150
void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
1151
{
1152
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1153
1154
if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif) ||
1155
iwl_mld_emlsr_active(vif) || mld_vif->emlsr.blocked_reasons)
1156
return;
1157
1158
iwl_mld_int_mlo_scan(mld, vif);
1159
}
1160
1161
static void iwl_mld_ignore_tpt_iter(void *data, u8 *mac,
1162
struct ieee80211_vif *vif)
1163
{
1164
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1165
struct iwl_mld *mld = mld_vif->mld;
1166
struct iwl_mld_sta *mld_sta;
1167
bool *start = (void *)data;
1168
1169
/* check_tpt_wk is only used when TPT block isn't set */
1170
if (mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT ||
1171
!IWL_MLD_AUTO_EML_ENABLE || !mld_vif->ap_sta)
1172
return;
1173
1174
mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
1175
1176
/* We only count for the AP sta in a MLO connection */
1177
if (!mld_sta->mpdu_counters)
1178
return;
1179
1180
if (*start) {
1181
wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
1182
&mld_vif->emlsr.check_tpt_wk);
1183
IWL_DEBUG_EHT(mld, "TPT check disabled\n");
1184
return;
1185
}
1186
1187
/* Clear the counters so we start from the beginning */
1188
for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
1189
struct iwl_mld_per_q_mpdu_counter *queue_counter =
1190
&mld_sta->mpdu_counters[q];
1191
1192
spin_lock_bh(&queue_counter->lock);
1193
1194
memset(queue_counter->per_link, 0,
1195
sizeof(queue_counter->per_link));
1196
1197
spin_unlock_bh(&queue_counter->lock);
1198
}
1199
1200
/* Schedule the check in 5 seconds */
1201
wiphy_delayed_work_queue(mld_vif->mld->wiphy,
1202
&mld_vif->emlsr.check_tpt_wk,
1203
round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
1204
IWL_DEBUG_EHT(mld, "TPT check enabled\n");
1205
}
1206
1207
void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld)
1208
{
1209
bool start = true;
1210
1211
ieee80211_iterate_active_interfaces_mtx(mld->hw,
1212
IEEE80211_IFACE_ITER_NORMAL,
1213
iwl_mld_ignore_tpt_iter,
1214
&start);
1215
}
1216
1217
void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld)
1218
{
1219
bool start = false;
1220
1221
ieee80211_iterate_active_interfaces_mtx(mld->hw,
1222
IEEE80211_IFACE_ITER_NORMAL,
1223
iwl_mld_ignore_tpt_iter,
1224
&start);
1225
}
1226
1227