Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/iwlwifi/mld/low_latency.c
48285 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
3
* Copyright (C) 2024-2025 Intel Corporation
4
*/
5
#include "mld.h"
6
#include "iface.h"
7
#include "low_latency.h"
8
#include "hcmd.h"
9
#include "power.h"
10
#include "mlo.h"
11
12
#define MLD_LL_WK_INTERVAL_MSEC 500
13
#define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000)
14
#define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10)
15
16
/* packets/MLD_LL_WK_PERIOD seconds */
17
#define MLD_LL_ENABLE_THRESH 100
18
19
static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
20
unsigned long timestamp)
21
{
22
struct iwl_mld_low_latency *ll = &mld->low_latency;
23
bool global_low_latency = false;
24
u8 num_rx_q = mld->trans->info.num_rxqs;
25
26
for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
27
u32 total_vo_vi_pkts = 0;
28
bool ll_period_expired;
29
30
/* If it's not initialized yet, it means we have not yet
31
* received/transmitted any vo/vi packet on this MAC.
32
*/
33
if (!ll->window_start[mac_id])
34
continue;
35
36
ll_period_expired =
37
time_after(timestamp, ll->window_start[mac_id] +
38
MLD_LL_ACTIVE_WK_PERIOD);
39
40
if (ll_period_expired)
41
ll->window_start[mac_id] = timestamp;
42
43
for (int q = 0; q < num_rx_q; q++) {
44
struct iwl_mld_low_latency_packets_counters *counters =
45
&mld->low_latency.pkts_counters[q];
46
47
spin_lock_bh(&counters->lock);
48
49
total_vo_vi_pkts += counters->vo_vi[mac_id];
50
51
if (ll_period_expired)
52
counters->vo_vi[mac_id] = 0;
53
54
spin_unlock_bh(&counters->lock);
55
}
56
57
/* enable immediately with enough packets but defer
58
* disabling only if the low-latency period expired and
59
* below threshold.
60
*/
61
if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH)
62
mld->low_latency.result[mac_id] = true;
63
else if (ll_period_expired)
64
mld->low_latency.result[mac_id] = false;
65
66
global_low_latency |= mld->low_latency.result[mac_id];
67
}
68
69
return global_low_latency;
70
}
71
72
static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
73
struct ieee80211_vif *vif)
74
{
75
struct iwl_mld *mld = _data;
76
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
77
bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
78
bool low_latency;
79
80
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
81
return;
82
83
low_latency = mld->low_latency.result[mld_vif->fw_id];
84
85
if (prev != low_latency)
86
iwl_mld_vif_update_low_latency(mld, vif, low_latency,
87
LOW_LATENCY_TRAFFIC);
88
}
89
90
static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk)
91
{
92
struct iwl_mld *mld = container_of(wk, struct iwl_mld,
93
low_latency.work.work);
94
unsigned long timestamp = jiffies;
95
bool low_latency_active;
96
97
if (mld->fw_status.in_hw_restart)
98
return;
99
100
/* It is assumed that the work was scheduled only after checking
101
* at least MLD_LL_PERIOD has passed since the last update.
102
*/
103
104
low_latency_active = iwl_mld_calc_low_latency(mld, timestamp);
105
106
/* Update the timestamp now after the low-latency calculation */
107
mld->low_latency.timestamp = timestamp;
108
109
/* If low-latency is active we need to force re-evaluation after
110
* 10 seconds, so that we can disable low-latency when
111
* the low-latency traffic ends.
112
*
113
* Otherwise, we don't need to run the work because there is nothing to
114
* disable.
115
*
116
* Note that this has no impact on the regular scheduling of the
117
* updates triggered by traffic - those happen whenever the
118
* MLD_LL_PERIOD timeout expire.
119
*/
120
if (low_latency_active)
121
wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
122
MLD_LL_ACTIVE_WK_PERIOD);
123
124
ieee80211_iterate_active_interfaces_mtx(mld->hw,
125
IEEE80211_IFACE_ITER_NORMAL,
126
iwl_mld_low_latency_iter, mld);
127
}
128
129
int iwl_mld_low_latency_init(struct iwl_mld *mld)
130
{
131
struct iwl_mld_low_latency *ll = &mld->low_latency;
132
unsigned long ts = jiffies;
133
134
ll->pkts_counters = kcalloc(mld->trans->info.num_rxqs,
135
sizeof(*ll->pkts_counters), GFP_KERNEL);
136
if (!ll->pkts_counters)
137
return -ENOMEM;
138
139
for (int q = 0; q < mld->trans->info.num_rxqs; q++)
140
spin_lock_init(&ll->pkts_counters[q].lock);
141
142
wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
143
144
ll->timestamp = ts;
145
146
/* The low-latency window_start will be initialized per-MAC on
147
* the first vo/vi packet received/transmitted.
148
*/
149
150
return 0;
151
}
152
153
void iwl_mld_low_latency_free(struct iwl_mld *mld)
154
{
155
struct iwl_mld_low_latency *ll = &mld->low_latency;
156
157
kfree(ll->pkts_counters);
158
ll->pkts_counters = NULL;
159
}
160
161
void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
162
{
163
struct iwl_mld_low_latency *ll = &mld->low_latency;
164
165
ll->timestamp = jiffies;
166
167
memset(ll->window_start, 0, sizeof(ll->window_start));
168
memset(ll->result, 0, sizeof(ll->result));
169
170
for (int q = 0; q < mld->trans->info.num_rxqs; q++)
171
memset(ll->pkts_counters[q].vo_vi, 0,
172
sizeof(ll->pkts_counters[q].vo_vi));
173
}
174
175
static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency,
176
u16 mac_id)
177
{
178
struct iwl_mac_low_latency_cmd cmd = {
179
.mac_id = cpu_to_le32(mac_id)
180
};
181
u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD);
182
int ret;
183
184
if (low_latency) {
185
/* Currently we don't care about the direction */
186
cmd.low_latency_rx = 1;
187
cmd.low_latency_tx = 1;
188
}
189
190
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
191
if (ret)
192
IWL_ERR(mld, "Failed to send low latency command\n");
193
194
return ret;
195
}
196
197
static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set,
198
enum iwl_mld_low_latency_cause cause)
199
{
200
if (set)
201
mld_vif->low_latency_causes |= cause;
202
else
203
mld_vif->low_latency_causes &= ~cause;
204
}
205
206
void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
207
struct ieee80211_vif *vif,
208
bool low_latency,
209
enum iwl_mld_low_latency_cause cause)
210
{
211
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
212
bool prev;
213
214
prev = iwl_mld_vif_low_latency(mld_vif);
215
iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause);
216
217
low_latency = iwl_mld_vif_low_latency(mld_vif);
218
if (low_latency == prev)
219
return;
220
221
if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) {
222
/* revert to previous low-latency state */
223
iwl_mld_vif_set_low_latency(mld_vif, prev, cause);
224
return;
225
}
226
227
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
228
return;
229
230
iwl_mld_update_mac_power(mld, vif, false);
231
232
if (low_latency)
233
iwl_mld_retry_emlsr(mld, vif);
234
}
235
236
static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr)
237
{
238
u8 tid;
239
static const u8 tid_to_mac80211_ac[] = {
240
IEEE80211_AC_BE,
241
IEEE80211_AC_BK,
242
IEEE80211_AC_BK,
243
IEEE80211_AC_BE,
244
IEEE80211_AC_VI,
245
IEEE80211_AC_VI,
246
IEEE80211_AC_VO,
247
IEEE80211_AC_VO,
248
};
249
250
if (!hdr || !ieee80211_is_data_qos(hdr->frame_control))
251
return false;
252
253
tid = ieee80211_get_tid(hdr);
254
if (tid >= IWL_MAX_TID_COUNT)
255
return false;
256
257
return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI;
258
}
259
260
void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
261
struct ieee80211_hdr *hdr,
262
struct ieee80211_sta *sta,
263
u8 queue)
264
{
265
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
266
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
267
struct iwl_mld_low_latency_packets_counters *counters;
268
unsigned long ts = jiffies ? jiffies : 1;
269
u8 fw_id = mld_vif->fw_id;
270
271
/* we should have failed op mode init if NULL */
272
if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
273
return;
274
275
if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
276
queue >= mld->trans->info.num_rxqs))
277
return;
278
279
if (mld->low_latency.stopped)
280
return;
281
282
if (!iwl_mld_is_vo_vi_pkt(hdr))
283
return;
284
285
counters = &mld->low_latency.pkts_counters[queue];
286
287
spin_lock_bh(&counters->lock);
288
counters->vo_vi[fw_id]++;
289
spin_unlock_bh(&counters->lock);
290
291
/* Initialize the window_start on the first vo/vi packet */
292
if (!mld->low_latency.window_start[fw_id])
293
mld->low_latency.window_start[fw_id] = ts;
294
295
if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD))
296
wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
297
0);
298
}
299
300
void iwl_mld_low_latency_stop(struct iwl_mld *mld)
301
{
302
lockdep_assert_wiphy(mld->wiphy);
303
304
mld->low_latency.stopped = true;
305
306
wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
307
}
308
309
void iwl_mld_low_latency_restart(struct iwl_mld *mld)
310
{
311
struct iwl_mld_low_latency *ll = &mld->low_latency;
312
bool low_latency = false;
313
unsigned long ts = jiffies;
314
315
lockdep_assert_wiphy(mld->wiphy);
316
317
ll->timestamp = ts;
318
mld->low_latency.stopped = false;
319
320
for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
321
ll->window_start[mac] = 0;
322
low_latency |= ll->result[mac];
323
324
for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
325
spin_lock_bh(&ll->pkts_counters[q].lock);
326
ll->pkts_counters[q].vo_vi[mac] = 0;
327
spin_unlock_bh(&ll->pkts_counters[q].lock);
328
}
329
}
330
331
/* if low latency is active, force re-evaluation to cover the case of
332
* no traffic.
333
*/
334
if (low_latency)
335
wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
336
}
337
338