Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/iwlwifi/mvm/sta.c
48287 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
3
* Copyright (C) 2012-2015, 2018-2025 Intel Corporation
4
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5
* Copyright (C) 2016-2017 Intel Deutschland GmbH
6
*/
7
#include <net/mac80211.h>
8
#if defined(__FreeBSD__)
9
#include <linux/cache.h>
10
#endif
11
12
#include "mvm.h"
13
#include "sta.h"
14
#include "rs.h"
15
16
/*
17
* New version of ADD_STA_sta command added new fields at the end of the
18
* structure, so sending the size of the relevant API's structure is enough to
19
* support both API versions.
20
*/
21
static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
22
{
23
if (iwl_mvm_has_new_rx_api(mvm) ||
24
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
25
return sizeof(struct iwl_mvm_add_sta_cmd);
26
else
27
return sizeof(struct iwl_mvm_add_sta_cmd_v7);
28
}
29
30
int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
31
{
32
int sta_id;
33
u32 reserved_ids = 0;
34
35
BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32);
36
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
37
38
lockdep_assert_held(&mvm->mutex);
39
40
/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
41
if (iftype != NL80211_IFTYPE_STATION)
42
reserved_ids = BIT(0);
43
44
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
45
for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
46
if (BIT(sta_id) & reserved_ids)
47
continue;
48
49
if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
50
lockdep_is_held(&mvm->mutex)))
51
return sta_id;
52
}
53
return IWL_INVALID_STA;
54
}
55
56
/* Calculate the ampdu density and max size */
57
u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
58
struct ieee80211_bss_conf *link_conf,
59
u32 *_agg_size)
60
{
61
u32 agg_size = 0, mpdu_dens = 0;
62
63
if (WARN_ON(!link_sta))
64
return 0;
65
66
/* Note that we always use only legacy & highest supported PPDUs, so
67
* of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
68
* the maximum A-MPDU size of various PPDU types in different bands,
69
* we only need to worry about the highest supported PPDU type here.
70
*/
71
72
if (link_sta->ht_cap.ht_supported) {
73
agg_size = link_sta->ht_cap.ampdu_factor;
74
mpdu_dens = link_sta->ht_cap.ampdu_density;
75
}
76
77
if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
78
/* overwrite HT values on 6 GHz */
79
mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
80
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
81
agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa,
82
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
83
} else if (link_sta->vht_cap.vht_supported) {
84
/* if VHT supported overwrite HT value */
85
agg_size = u32_get_bits(link_sta->vht_cap.cap,
86
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
87
}
88
89
/* D6.0 10.12.2 A-MPDU length limit rules
90
* A STA indicates the maximum length of the A-MPDU preEOF padding
91
* that it can receive in an HE PPDU in the Maximum A-MPDU Length
92
* Exponent field in its HT Capabilities, VHT Capabilities,
93
* and HE 6 GHz Band Capabilities elements (if present) and the
94
* Maximum AMPDU Length Exponent Extension field in its HE
95
* Capabilities element
96
*/
97
if (link_sta->he_cap.has_he)
98
agg_size +=
99
u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
100
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
101
102
if (link_sta->eht_cap.has_eht)
103
agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
104
IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
105
106
/* Limit to max A-MPDU supported by FW */
107
agg_size = min_t(u32, agg_size,
108
STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
109
110
*_agg_size = agg_size;
111
return mpdu_dens;
112
}
113
114
u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta)
115
{
116
u8 uapsd_acs = 0;
117
118
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
119
uapsd_acs |= BIT(AC_BK);
120
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
121
uapsd_acs |= BIT(AC_BE);
122
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
123
uapsd_acs |= BIT(AC_VI);
124
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
125
uapsd_acs |= BIT(AC_VO);
126
127
return uapsd_acs | uapsd_acs << 4;
128
}
129
130
/* send station add/update command to firmware */
131
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
132
bool update, unsigned int flags)
133
{
134
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
135
struct iwl_mvm_add_sta_cmd add_sta_cmd = {
136
.sta_id = mvm_sta->deflink.sta_id,
137
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
138
.add_modify = update ? 1 : 0,
139
.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
140
STA_FLG_MIMO_EN_MSK |
141
STA_FLG_RTS_MIMO_PROT),
142
.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
143
};
144
int ret;
145
u32 status;
146
u32 agg_size = 0, mpdu_dens = 0;
147
148
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
149
add_sta_cmd.station_type = mvm_sta->sta_type;
150
151
if (!update || (flags & STA_MODIFY_QUEUES)) {
152
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
153
154
if (!iwl_mvm_has_new_tx_api(mvm)) {
155
add_sta_cmd.tfd_queue_msk =
156
cpu_to_le32(mvm_sta->tfd_queue_msk);
157
158
if (flags & STA_MODIFY_QUEUES)
159
add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
160
} else {
161
WARN_ON(flags & STA_MODIFY_QUEUES);
162
}
163
}
164
165
switch (sta->deflink.bandwidth) {
166
case IEEE80211_STA_RX_BW_320:
167
case IEEE80211_STA_RX_BW_160:
168
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
169
fallthrough;
170
case IEEE80211_STA_RX_BW_80:
171
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
172
fallthrough;
173
case IEEE80211_STA_RX_BW_40:
174
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
175
fallthrough;
176
case IEEE80211_STA_RX_BW_20:
177
if (sta->deflink.ht_cap.ht_supported)
178
add_sta_cmd.station_flags |=
179
cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
180
break;
181
}
182
183
switch (sta->deflink.rx_nss) {
184
case 1:
185
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
186
break;
187
case 2:
188
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
189
break;
190
case 3 ... 8:
191
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
192
break;
193
}
194
195
switch (sta->deflink.smps_mode) {
196
case IEEE80211_SMPS_AUTOMATIC:
197
case IEEE80211_SMPS_NUM_MODES:
198
WARN_ON(1);
199
break;
200
case IEEE80211_SMPS_STATIC:
201
/* override NSS */
202
add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
203
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
204
break;
205
case IEEE80211_SMPS_DYNAMIC:
206
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
207
break;
208
case IEEE80211_SMPS_OFF:
209
/* nothing */
210
break;
211
}
212
213
if (sta->deflink.ht_cap.ht_supported ||
214
mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
215
add_sta_cmd.station_flags_msk |=
216
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
217
STA_FLG_AGG_MPDU_DENS_MSK);
218
219
mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink,
220
&mvm_sta->vif->bss_conf,
221
&agg_size);
222
add_sta_cmd.station_flags |=
223
cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
224
add_sta_cmd.station_flags |=
225
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
226
227
if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
228
add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
229
230
if (sta->wme) {
231
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
232
add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta);
233
add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
234
}
235
236
status = ADD_STA_SUCCESS;
237
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
238
iwl_mvm_add_sta_cmd_size(mvm),
239
&add_sta_cmd, &status);
240
if (ret)
241
return ret;
242
243
switch (status & IWL_ADD_STA_STATUS_MASK) {
244
case ADD_STA_SUCCESS:
245
IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
246
break;
247
default:
248
ret = -EIO;
249
IWL_ERR(mvm, "ADD_STA failed\n");
250
break;
251
}
252
253
return ret;
254
}
255
256
static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
257
{
258
struct iwl_mvm_baid_data *data =
259
timer_container_of(data, t, session_timer);
260
struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
261
struct iwl_mvm_baid_data *ba_data;
262
struct ieee80211_sta *sta;
263
struct iwl_mvm_sta *mvm_sta;
264
unsigned long timeout;
265
unsigned int sta_id;
266
267
rcu_read_lock();
268
269
ba_data = rcu_dereference(*rcu_ptr);
270
271
if (WARN_ON(!ba_data))
272
goto unlock;
273
274
if (!ba_data->timeout)
275
goto unlock;
276
277
timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
278
if (time_is_after_jiffies(timeout)) {
279
mod_timer(&ba_data->session_timer, timeout);
280
goto unlock;
281
}
282
283
/* Timer expired */
284
sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */
285
sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]);
286
287
/*
288
* sta should be valid unless the following happens:
289
* The firmware asserts which triggers a reconfig flow, but
290
* the reconfig fails before we set the pointer to sta into
291
* the fw_id_to_mac_id pointer table. Mac80211 can't stop
292
* A-MDPU and hence the timer continues to run. Then, the
293
* timer expires and sta is NULL.
294
*/
295
if (IS_ERR_OR_NULL(sta))
296
goto unlock;
297
298
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
299
ieee80211_rx_ba_timer_expired(mvm_sta->vif,
300
sta->addr, ba_data->tid);
301
unlock:
302
rcu_read_unlock();
303
}
304
305
/* Disable aggregations for a bitmap of TIDs for a given station */
306
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
307
unsigned long disable_agg_tids,
308
bool remove_queue)
309
{
310
struct iwl_mvm_add_sta_cmd cmd = {};
311
struct ieee80211_sta *sta;
312
struct iwl_mvm_sta *mvmsta;
313
u32 status;
314
u8 sta_id;
315
316
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
317
return -EINVAL;
318
319
sta_id = mvm->queue_info[queue].ra_sta_id;
320
321
rcu_read_lock();
322
323
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
324
325
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
326
rcu_read_unlock();
327
return -EINVAL;
328
}
329
330
mvmsta = iwl_mvm_sta_from_mac80211(sta);
331
332
mvmsta->tid_disable_agg |= disable_agg_tids;
333
334
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
335
cmd.sta_id = mvmsta->deflink.sta_id;
336
cmd.add_modify = STA_MODE_MODIFY;
337
cmd.modify_mask = STA_MODIFY_QUEUES;
338
if (disable_agg_tids)
339
cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
340
if (remove_queue)
341
cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
342
cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
343
cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
344
345
rcu_read_unlock();
346
347
/* Notify FW of queue removal from the STA queues */
348
status = ADD_STA_SUCCESS;
349
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
350
iwl_mvm_add_sta_cmd_size(mvm),
351
&cmd, &status);
352
}
353
354
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
355
int sta_id, u16 *queueptr, u8 tid)
356
{
357
int queue = *queueptr;
358
struct iwl_scd_txq_cfg_cmd cmd = {
359
.scd_queue = queue,
360
.action = SCD_CFG_DISABLE_QUEUE,
361
};
362
int ret;
363
364
lockdep_assert_held(&mvm->mutex);
365
366
if (iwl_mvm_has_new_tx_api(mvm)) {
367
if (mvm->sta_remove_requires_queue_remove) {
368
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
369
SCD_QUEUE_CONFIG_CMD);
370
struct iwl_scd_queue_cfg_cmd remove_cmd = {
371
.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
372
.u.remove.sta_mask = cpu_to_le32(BIT(sta_id)),
373
};
374
375
if (tid == IWL_MAX_TID_COUNT)
376
tid = IWL_MGMT_TID;
377
378
remove_cmd.u.remove.tid = cpu_to_le32(tid);
379
380
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
381
sizeof(remove_cmd),
382
&remove_cmd);
383
} else {
384
ret = 0;
385
}
386
387
iwl_trans_txq_free(mvm->trans, queue);
388
*queueptr = IWL_MVM_INVALID_QUEUE;
389
390
return ret;
391
}
392
393
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
394
return 0;
395
396
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
397
398
cmd.action = mvm->queue_info[queue].tid_bitmap ?
399
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
400
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
401
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
402
403
IWL_DEBUG_TX_QUEUES(mvm,
404
"Disabling TXQ #%d tids=0x%x\n",
405
queue,
406
mvm->queue_info[queue].tid_bitmap);
407
408
/* If the queue is still enabled - nothing left to do in this func */
409
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
410
return 0;
411
412
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
413
cmd.tid = mvm->queue_info[queue].txq_tid;
414
415
/* Make sure queue info is correct even though we overwrite it */
416
WARN(mvm->queue_info[queue].tid_bitmap,
417
"TXQ #%d info out-of-sync - tids=0x%x\n",
418
queue, mvm->queue_info[queue].tid_bitmap);
419
420
/* If we are here - the queue is freed and we can zero out these vals */
421
mvm->queue_info[queue].tid_bitmap = 0;
422
423
if (sta) {
424
struct iwl_mvm_txq *mvmtxq =
425
iwl_mvm_txq_from_tid(sta, tid);
426
427
spin_lock_bh(&mvm->add_stream_lock);
428
list_del_init(&mvmtxq->list);
429
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
430
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
431
spin_unlock_bh(&mvm->add_stream_lock);
432
}
433
434
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
435
mvm->queue_info[queue].reserved = false;
436
437
iwl_trans_txq_disable(mvm->trans, queue, false);
438
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
439
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
440
441
if (ret)
442
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
443
queue, ret);
444
return ret;
445
}
446
447
static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
448
{
449
struct ieee80211_sta *sta;
450
struct iwl_mvm_sta *mvmsta;
451
unsigned long tid_bitmap;
452
unsigned long agg_tids = 0;
453
u8 sta_id;
454
int tid;
455
456
lockdep_assert_held(&mvm->mutex);
457
458
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
459
return -EINVAL;
460
461
sta_id = mvm->queue_info[queue].ra_sta_id;
462
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
463
464
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
465
lockdep_is_held(&mvm->mutex));
466
467
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
468
return -EINVAL;
469
470
mvmsta = iwl_mvm_sta_from_mac80211(sta);
471
472
spin_lock_bh(&mvmsta->lock);
473
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
474
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
475
agg_tids |= BIT(tid);
476
}
477
spin_unlock_bh(&mvmsta->lock);
478
479
return agg_tids;
480
}
481
482
/*
483
* Remove a queue from a station's resources.
484
* Note that this only marks as free. It DOESN'T delete a BA agreement, and
485
* doesn't disable the queue
486
*/
487
static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
488
{
489
struct ieee80211_sta *sta;
490
struct iwl_mvm_sta *mvmsta;
491
unsigned long tid_bitmap;
492
unsigned long disable_agg_tids = 0;
493
u8 sta_id;
494
int tid;
495
496
lockdep_assert_held(&mvm->mutex);
497
498
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
499
return -EINVAL;
500
501
sta_id = mvm->queue_info[queue].ra_sta_id;
502
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
503
504
rcu_read_lock();
505
506
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
507
508
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
509
rcu_read_unlock();
510
return 0;
511
}
512
513
mvmsta = iwl_mvm_sta_from_mac80211(sta);
514
515
spin_lock_bh(&mvmsta->lock);
516
/* Unmap MAC queues and TIDs from this queue */
517
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
518
struct iwl_mvm_txq *mvmtxq =
519
iwl_mvm_txq_from_tid(sta, tid);
520
521
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
522
disable_agg_tids |= BIT(tid);
523
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
524
525
spin_lock_bh(&mvm->add_stream_lock);
526
list_del_init(&mvmtxq->list);
527
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
528
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
529
spin_unlock_bh(&mvm->add_stream_lock);
530
}
531
532
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
533
spin_unlock_bh(&mvmsta->lock);
534
535
rcu_read_unlock();
536
537
/*
538
* The TX path may have been using this TXQ_ID from the tid_data,
539
* so make sure it's no longer running so that we can safely reuse
540
* this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
541
* above, but nothing guarantees we've stopped using them. Thus,
542
* without this, we could get to iwl_mvm_disable_txq() and remove
543
* the queue while still sending frames to it.
544
*/
545
synchronize_net();
546
547
return disable_agg_tids;
548
}
549
550
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
551
struct ieee80211_sta *old_sta,
552
u8 new_sta_id)
553
{
554
struct iwl_mvm_sta *mvmsta;
555
u8 sta_id, tid;
556
unsigned long disable_agg_tids = 0;
557
bool same_sta;
558
u16 queue_tmp = queue;
559
int ret;
560
561
lockdep_assert_held(&mvm->mutex);
562
563
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
564
return -EINVAL;
565
566
sta_id = mvm->queue_info[queue].ra_sta_id;
567
tid = mvm->queue_info[queue].txq_tid;
568
569
same_sta = sta_id == new_sta_id;
570
571
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
572
if (WARN_ON(!mvmsta))
573
return -EINVAL;
574
575
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
576
/* Disable the queue */
577
if (disable_agg_tids)
578
iwl_mvm_invalidate_sta_queue(mvm, queue,
579
disable_agg_tids, false);
580
581
ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);
582
if (ret) {
583
IWL_ERR(mvm,
584
"Failed to free inactive queue %d (ret=%d)\n",
585
queue, ret);
586
587
return ret;
588
}
589
590
/* If TXQ is allocated to another STA, update removal in FW */
591
if (!same_sta)
592
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
593
594
return 0;
595
}
596
597
static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
598
unsigned long tfd_queue_mask, u8 ac)
599
{
600
int queue = 0;
601
u8 ac_to_queue[IEEE80211_NUM_ACS];
602
int i;
603
604
/*
605
* This protects us against grabbing a queue that's being reconfigured
606
* by the inactivity checker.
607
*/
608
lockdep_assert_held(&mvm->mutex);
609
610
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
611
return -EINVAL;
612
613
memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
614
615
/* See what ACs the existing queues for this STA have */
616
for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
617
/* Only DATA queues can be shared */
618
if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
619
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
620
continue;
621
622
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
623
}
624
625
/*
626
* The queue to share is chosen only from DATA queues as follows (in
627
* descending priority):
628
* 1. An AC_BE queue
629
* 2. Same AC queue
630
* 3. Highest AC queue that is lower than new AC
631
* 4. Any existing AC (there always is at least 1 DATA queue)
632
*/
633
634
/* Priority 1: An AC_BE queue */
635
if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
636
queue = ac_to_queue[IEEE80211_AC_BE];
637
/* Priority 2: Same AC queue */
638
else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
639
queue = ac_to_queue[ac];
640
/* Priority 3a: If new AC is VO and VI exists - use VI */
641
else if (ac == IEEE80211_AC_VO &&
642
ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
643
queue = ac_to_queue[IEEE80211_AC_VI];
644
/* Priority 3b: No BE so only AC less than the new one is BK */
645
else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
646
queue = ac_to_queue[IEEE80211_AC_BK];
647
/* Priority 4a: No BE nor BK - use VI if exists */
648
else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
649
queue = ac_to_queue[IEEE80211_AC_VI];
650
/* Priority 4b: No BE, BK nor VI - use VO if exists */
651
else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
652
queue = ac_to_queue[IEEE80211_AC_VO];
653
654
/* Make sure queue found (or not) is legal */
655
if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
656
!iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
657
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
658
IWL_ERR(mvm, "No DATA queues available to share\n");
659
return -ENOSPC;
660
}
661
662
return queue;
663
}
664
665
/* Re-configure the SCD for a queue that has already been configured */
666
static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
667
int sta_id, int tid, int frame_limit, u16 ssn)
668
{
669
struct iwl_scd_txq_cfg_cmd cmd = {
670
.scd_queue = queue,
671
.action = SCD_CFG_ENABLE_QUEUE,
672
.window = frame_limit,
673
.sta_id = sta_id,
674
.ssn = cpu_to_le16(ssn),
675
.tx_fifo = fifo,
676
.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
677
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
678
.tid = tid,
679
};
680
int ret;
681
682
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
683
return -EINVAL;
684
685
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
686
"Trying to reconfig unallocated queue %d\n", queue))
687
return -ENXIO;
688
689
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
690
691
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
692
WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
693
queue, fifo, ret);
694
695
return ret;
696
}
697
698
/*
699
* If a given queue has a higher AC than the TID stream that is being compared
700
* to, the queue needs to be redirected to the lower AC. This function does that
701
* in such a case, otherwise - if no redirection required - it does nothing,
702
* unless the %force param is true.
703
*/
704
static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
705
int ac, int ssn, unsigned int wdg_timeout,
706
bool force, struct iwl_mvm_txq *txq)
707
{
708
struct iwl_scd_txq_cfg_cmd cmd = {
709
.scd_queue = queue,
710
.action = SCD_CFG_DISABLE_QUEUE,
711
};
712
bool shared_queue;
713
int ret;
714
715
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
716
return -EINVAL;
717
718
/*
719
* If the AC is lower than current one - FIFO needs to be redirected to
720
* the lowest one of the streams in the queue. Check if this is needed
721
* here.
722
* Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
723
* value 3 and VO with value 0, so to check if ac X is lower than ac Y
724
* we need to check if the numerical value of X is LARGER than of Y.
725
*/
726
if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
727
IWL_DEBUG_TX_QUEUES(mvm,
728
"No redirection needed on TXQ #%d\n",
729
queue);
730
return 0;
731
}
732
733
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
734
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
735
cmd.tid = mvm->queue_info[queue].txq_tid;
736
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
737
738
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
739
queue, iwl_mvm_ac_to_tx_fifo[ac]);
740
741
/* Stop the queue and wait for it to empty */
742
set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
743
744
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
745
if (ret) {
746
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
747
queue);
748
ret = -EIO;
749
goto out;
750
}
751
752
/* Before redirecting the queue we need to de-activate it */
753
iwl_trans_txq_disable(mvm->trans, queue, false);
754
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
755
if (ret)
756
IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
757
ret);
758
759
/* Make sure the SCD wrptr is correctly set before reconfiguring */
760
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
761
762
/* Update the TID "owner" of the queue */
763
mvm->queue_info[queue].txq_tid = tid;
764
765
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
766
767
/* Redirect to lower AC */
768
iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
769
cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
770
771
/* Update AC marking of the queue */
772
mvm->queue_info[queue].mac80211_ac = ac;
773
774
/*
775
* Mark queue as shared in transport if shared
776
* Note this has to be done after queue enablement because enablement
777
* can also set this value, and there is no indication there to shared
778
* queues
779
*/
780
if (shared_queue)
781
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
782
783
out:
784
/* Continue using the queue */
785
clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
786
787
return ret;
788
}
789
790
static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
791
u8 minq, u8 maxq)
792
{
793
int i;
794
795
lockdep_assert_held(&mvm->mutex);
796
797
if (WARN(maxq >= mvm->trans->mac_cfg->base->num_of_queues,
798
"max queue %d >= num_of_queues (%d)", maxq,
799
mvm->trans->mac_cfg->base->num_of_queues))
800
maxq = mvm->trans->mac_cfg->base->num_of_queues - 1;
801
802
/* This should not be hit with new TX path */
803
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
804
return -ENOSPC;
805
806
/* Start by looking for a free queue */
807
for (i = minq; i <= maxq; i++)
808
if (mvm->queue_info[i].tid_bitmap == 0 &&
809
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
810
return i;
811
812
return -ENOSPC;
813
}
814
815
static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta)
816
{
817
int max_size = IWL_DEFAULT_QUEUE_SIZE;
818
unsigned int link_id;
819
820
/* this queue isn't used for traffic (cab_queue) */
821
if (!sta)
822
return IWL_MGMT_QUEUE_SIZE;
823
824
rcu_read_lock();
825
826
for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
827
struct ieee80211_link_sta *link =
828
rcu_dereference(sta->link[link_id]);
829
830
if (!link)
831
continue;
832
833
/* support for 512 ba size */
834
if (link->eht_cap.has_eht &&
835
max_size < IWL_DEFAULT_QUEUE_SIZE_EHT)
836
max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
837
838
/* support for 256 ba size */
839
if (link->he_cap.has_he &&
840
max_size < IWL_DEFAULT_QUEUE_SIZE_HE)
841
max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
842
}
843
844
rcu_read_unlock();
845
return max_size;
846
}
847
848
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
849
struct ieee80211_sta *sta,
850
u8 sta_id, u8 tid, unsigned int timeout)
851
{
852
int queue, size;
853
u32 sta_mask = 0;
854
855
if (tid == IWL_MAX_TID_COUNT) {
856
tid = IWL_MGMT_TID;
857
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
858
mvm->trans->mac_cfg->base->min_txq_size);
859
} else {
860
size = iwl_mvm_get_queue_size(sta);
861
}
862
863
if (sta) {
864
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
865
struct ieee80211_link_sta *link_sta;
866
unsigned int link_id;
867
868
rcu_read_lock();
869
for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) {
870
struct iwl_mvm_link_sta *link =
871
rcu_dereference_protected(mvmsta->link[link_id],
872
lockdep_is_held(&mvm->mutex));
873
874
if (!link)
875
continue;
876
877
sta_mask |= BIT(link->sta_id);
878
}
879
rcu_read_unlock();
880
} else {
881
sta_mask |= BIT(sta_id);
882
}
883
884
if (!sta_mask)
885
return -EINVAL;
886
887
queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
888
tid, size, timeout);
889
890
if (queue >= 0)
891
IWL_DEBUG_TX_QUEUES(mvm,
892
"Enabling TXQ #%d for sta mask 0x%x tid %d\n",
893
queue, sta_mask, tid);
894
895
return queue;
896
}
897
898
static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
899
struct ieee80211_sta *sta, u8 ac,
900
int tid)
901
{
902
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
903
struct iwl_mvm_txq *mvmtxq =
904
iwl_mvm_txq_from_tid(sta, tid);
905
unsigned int wdg_timeout =
906
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
907
int queue = -1;
908
909
lockdep_assert_held(&mvm->mutex);
910
911
IWL_DEBUG_TX_QUEUES(mvm,
912
"Allocating queue for sta %d on tid %d\n",
913
mvmsta->deflink.sta_id, tid);
914
queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id,
915
tid, wdg_timeout);
916
if (queue < 0)
917
return queue;
918
919
mvmtxq->txq_id = queue;
920
mvm->tvqm_info[queue].txq_tid = tid;
921
mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id;
922
923
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
924
925
spin_lock_bh(&mvmsta->lock);
926
mvmsta->tid_data[tid].txq_id = queue;
927
spin_unlock_bh(&mvmsta->lock);
928
929
return 0;
930
}
931
932
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
933
struct ieee80211_sta *sta,
934
int queue, u8 sta_id, u8 tid)
935
{
936
bool enable_queue = true;
937
938
/* Make sure this TID isn't already enabled */
939
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
940
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
941
queue, tid);
942
return false;
943
}
944
945
/* Update mappings and refcounts */
946
if (mvm->queue_info[queue].tid_bitmap)
947
enable_queue = false;
948
949
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
950
mvm->queue_info[queue].ra_sta_id = sta_id;
951
952
if (enable_queue) {
953
if (tid != IWL_MAX_TID_COUNT)
954
mvm->queue_info[queue].mac80211_ac =
955
tid_to_mac80211_ac[tid];
956
else
957
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
958
959
mvm->queue_info[queue].txq_tid = tid;
960
}
961
962
if (sta) {
963
struct iwl_mvm_txq *mvmtxq =
964
iwl_mvm_txq_from_tid(sta, tid);
965
966
mvmtxq->txq_id = queue;
967
}
968
969
IWL_DEBUG_TX_QUEUES(mvm,
970
"Enabling TXQ #%d tids=0x%x\n",
971
queue, mvm->queue_info[queue].tid_bitmap);
972
973
return enable_queue;
974
}
975
976
static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
977
int queue, u16 ssn,
978
const struct iwl_trans_txq_scd_cfg *cfg,
979
unsigned int wdg_timeout)
980
{
981
struct iwl_scd_txq_cfg_cmd cmd = {
982
.scd_queue = queue,
983
.action = SCD_CFG_ENABLE_QUEUE,
984
.window = cfg->frame_limit,
985
.sta_id = cfg->sta_id,
986
.ssn = cpu_to_le16(ssn),
987
.tx_fifo = cfg->fifo,
988
.aggregate = cfg->aggregate,
989
.tid = cfg->tid,
990
};
991
bool inc_ssn;
992
993
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
994
return false;
995
996
/* Send the enabling command if we need to */
997
if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
998
return false;
999
1000
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
1001
NULL, wdg_timeout);
1002
if (inc_ssn)
1003
le16_add_cpu(&cmd.ssn, 1);
1004
1005
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
1006
"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
1007
1008
return inc_ssn;
1009
}
1010
1011
static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
1012
{
1013
struct iwl_scd_txq_cfg_cmd cmd = {
1014
.scd_queue = queue,
1015
.action = SCD_CFG_UPDATE_QUEUE_TID,
1016
};
1017
int tid;
1018
unsigned long tid_bitmap;
1019
int ret;
1020
1021
lockdep_assert_held(&mvm->mutex);
1022
1023
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1024
return;
1025
1026
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1027
1028
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1029
return;
1030
1031
/* Find any TID for queue */
1032
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1033
cmd.tid = tid;
1034
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1035
1036
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
1037
if (ret) {
1038
IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1039
queue, ret);
1040
return;
1041
}
1042
1043
mvm->queue_info[queue].txq_tid = tid;
1044
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1045
queue, tid);
1046
}
1047
1048
static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1049
{
1050
struct ieee80211_sta *sta;
1051
struct iwl_mvm_sta *mvmsta;
1052
u8 sta_id;
1053
int tid = -1;
1054
unsigned long tid_bitmap;
1055
unsigned int wdg_timeout;
1056
int ssn;
1057
int ret = true;
1058
1059
/* queue sharing is disabled on new TX path */
1060
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1061
return;
1062
1063
lockdep_assert_held(&mvm->mutex);
1064
1065
sta_id = mvm->queue_info[queue].ra_sta_id;
1066
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1067
1068
/* Find TID for queue, and make sure it is the only one on the queue */
1069
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1070
if (tid_bitmap != BIT(tid)) {
1071
IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1072
queue, tid_bitmap);
1073
return;
1074
}
1075
1076
IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1077
tid);
1078
1079
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1080
lockdep_is_held(&mvm->mutex));
1081
1082
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1083
return;
1084
1085
mvmsta = iwl_mvm_sta_from_mac80211(sta);
1086
wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
1087
1088
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1089
1090
ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1091
tid_to_mac80211_ac[tid], ssn,
1092
wdg_timeout, true,
1093
iwl_mvm_txq_from_tid(sta, tid));
1094
if (ret) {
1095
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1096
return;
1097
}
1098
1099
/* If aggs should be turned back on - do it */
1100
if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1101
struct iwl_mvm_add_sta_cmd cmd = {0};
1102
1103
mvmsta->tid_disable_agg &= ~BIT(tid);
1104
1105
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1106
cmd.sta_id = mvmsta->deflink.sta_id;
1107
cmd.add_modify = STA_MODE_MODIFY;
1108
cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1109
cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1110
cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1111
1112
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1113
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1114
if (!ret) {
1115
IWL_DEBUG_TX_QUEUES(mvm,
1116
"TXQ #%d is now aggregated again\n",
1117
queue);
1118
1119
/* Mark queue intenally as aggregating again */
1120
iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1121
}
1122
}
1123
1124
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1125
}
1126
1127
/*
1128
* Remove inactive TIDs of a given queue.
1129
* If all queue TIDs are inactive - mark the queue as inactive
1130
* If only some the queue TIDs are inactive - unmap them from the queue
1131
*
1132
* Returns %true if all TIDs were removed and the queue could be reused.
1133
*/
1134
static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1135
struct iwl_mvm_sta *mvmsta, int queue,
1136
unsigned long tid_bitmap,
1137
unsigned long *unshare_queues,
1138
unsigned long *changetid_queues)
1139
{
1140
unsigned int tid;
1141
1142
lockdep_assert_held(&mvmsta->lock);
1143
lockdep_assert_held(&mvm->mutex);
1144
1145
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1146
return false;
1147
1148
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1149
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1150
/* If some TFDs are still queued - don't mark TID as inactive */
1151
if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1152
tid_bitmap &= ~BIT(tid);
1153
1154
/* Don't mark as inactive any TID that has an active BA */
1155
if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1156
tid_bitmap &= ~BIT(tid);
1157
}
1158
1159
/* If all TIDs in the queue are inactive - return it can be reused */
1160
if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1161
IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1162
return true;
1163
}
1164
1165
/*
1166
* If we are here, this is a shared queue and not all TIDs timed-out.
1167
* Remove the ones that did.
1168
*/
1169
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1170
u16 q_tid_bitmap;
1171
1172
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1173
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1174
1175
q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1176
1177
/*
1178
* We need to take into account a situation in which a TXQ was
1179
* allocated to TID x, and then turned shared by adding TIDs y
1180
* and z. If TID x becomes inactive and is removed from the TXQ,
1181
* ownership must be given to one of the remaining TIDs.
1182
* This is mainly because if TID x continues - a new queue can't
1183
* be allocated for it as long as it is an owner of another TXQ.
1184
*
1185
* Mark this queue in the right bitmap, we'll send the command
1186
* to the firmware later.
1187
*/
1188
if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1189
set_bit(queue, changetid_queues);
1190
1191
IWL_DEBUG_TX_QUEUES(mvm,
1192
"Removing inactive TID %d from shared Q:%d\n",
1193
tid, queue);
1194
}
1195
1196
IWL_DEBUG_TX_QUEUES(mvm,
1197
"TXQ #%d left with tid bitmap 0x%x\n", queue,
1198
mvm->queue_info[queue].tid_bitmap);
1199
1200
/*
1201
* There may be different TIDs with the same mac queues, so make
1202
* sure all TIDs have existing corresponding mac queues enabled
1203
*/
1204
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1205
1206
/* If the queue is marked as shared - "unshare" it */
1207
if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1208
mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1209
IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1210
queue);
1211
set_bit(queue, unshare_queues);
1212
}
1213
1214
return false;
1215
}
1216
1217
/*
1218
* Check for inactivity - this includes checking if any queue
1219
* can be unshared and finding one (and only one) that can be
1220
* reused.
1221
* This function is also invoked as a sort of clean-up task,
1222
* in which case @alloc_for_sta is IWL_INVALID_STA.
1223
*
1224
* Returns the queue number, or -ENOSPC.
1225
*/
1226
static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1227
{
1228
unsigned long now = jiffies;
1229
unsigned long unshare_queues = 0;
1230
unsigned long changetid_queues = 0;
1231
int i, ret, free_queue = -ENOSPC;
1232
struct ieee80211_sta *queue_owner = NULL;
1233
1234
lockdep_assert_held(&mvm->mutex);
1235
1236
if (iwl_mvm_has_new_tx_api(mvm))
1237
return -ENOSPC;
1238
1239
rcu_read_lock();
1240
1241
/* we skip the CMD queue below by starting at 1 */
1242
BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1243
1244
for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1245
struct ieee80211_sta *sta;
1246
struct iwl_mvm_sta *mvmsta;
1247
u8 sta_id;
1248
int tid;
1249
unsigned long inactive_tid_bitmap = 0;
1250
unsigned long queue_tid_bitmap;
1251
1252
queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1253
if (!queue_tid_bitmap)
1254
continue;
1255
1256
/* If TXQ isn't in active use anyway - nothing to do here... */
1257
if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1258
mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1259
continue;
1260
1261
/* Check to see if there are inactive TIDs on this queue */
1262
for_each_set_bit(tid, &queue_tid_bitmap,
1263
IWL_MAX_TID_COUNT + 1) {
1264
if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1265
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1266
continue;
1267
1268
inactive_tid_bitmap |= BIT(tid);
1269
}
1270
1271
/* If all TIDs are active - finish check on this queue */
1272
if (!inactive_tid_bitmap)
1273
continue;
1274
1275
/*
1276
* If we are here - the queue hadn't been served recently and is
1277
* in use
1278
*/
1279
1280
sta_id = mvm->queue_info[i].ra_sta_id;
1281
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1282
1283
/*
1284
* If the STA doesn't exist anymore, it isn't an error. It could
1285
* be that it was removed since getting the queues, and in this
1286
* case it should've inactivated its queues anyway.
1287
*/
1288
if (IS_ERR_OR_NULL(sta))
1289
continue;
1290
1291
mvmsta = iwl_mvm_sta_from_mac80211(sta);
1292
1293
spin_lock_bh(&mvmsta->lock);
1294
ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1295
inactive_tid_bitmap,
1296
&unshare_queues,
1297
&changetid_queues);
1298
if (ret && free_queue < 0) {
1299
queue_owner = sta;
1300
free_queue = i;
1301
}
1302
/* only unlock sta lock - we still need the queue info lock */
1303
spin_unlock_bh(&mvmsta->lock);
1304
}
1305
1306
1307
/* Reconfigure queues requiring reconfiguation */
1308
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1309
iwl_mvm_unshare_queue(mvm, i);
1310
for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1311
iwl_mvm_change_queue_tid(mvm, i);
1312
1313
rcu_read_unlock();
1314
1315
if (free_queue >= 0 && alloc_for_sta != IWL_INVALID_STA) {
1316
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1317
alloc_for_sta);
1318
if (ret)
1319
return ret;
1320
}
1321
1322
return free_queue;
1323
}
1324
1325
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1326
struct ieee80211_sta *sta, u8 ac, int tid)
1327
{
1328
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1329
struct iwl_trans_txq_scd_cfg cfg = {
1330
.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1331
.sta_id = mvmsta->deflink.sta_id,
1332
.tid = tid,
1333
.frame_limit = IWL_FRAME_LIMIT,
1334
};
1335
unsigned int wdg_timeout =
1336
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
1337
int queue = -1;
1338
u16 queue_tmp;
1339
unsigned long disable_agg_tids = 0;
1340
enum iwl_mvm_agg_state queue_state;
1341
bool shared_queue = false, inc_ssn;
1342
int ssn;
1343
unsigned long tfd_queue_mask;
1344
int ret;
1345
1346
lockdep_assert_held(&mvm->mutex);
1347
1348
if (iwl_mvm_has_new_tx_api(mvm))
1349
return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1350
1351
spin_lock_bh(&mvmsta->lock);
1352
tfd_queue_mask = mvmsta->tfd_queue_msk;
1353
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1354
spin_unlock_bh(&mvmsta->lock);
1355
1356
if (tid == IWL_MAX_TID_COUNT) {
1357
queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1358
IWL_MVM_DQA_MIN_MGMT_QUEUE,
1359
IWL_MVM_DQA_MAX_MGMT_QUEUE);
1360
if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1361
IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1362
queue);
1363
1364
/* If no such queue is found, we'll use a DATA queue instead */
1365
}
1366
1367
if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1368
(mvm->queue_info[mvmsta->reserved_queue].status ==
1369
IWL_MVM_QUEUE_RESERVED)) {
1370
queue = mvmsta->reserved_queue;
1371
mvm->queue_info[queue].reserved = true;
1372
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1373
}
1374
1375
if (queue < 0)
1376
queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1377
IWL_MVM_DQA_MIN_DATA_QUEUE,
1378
IWL_MVM_DQA_MAX_DATA_QUEUE);
1379
if (queue < 0) {
1380
/* try harder - perhaps kill an inactive queue */
1381
queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
1382
}
1383
1384
/* No free queue - we'll have to share */
1385
if (queue <= 0) {
1386
queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1387
if (queue > 0) {
1388
shared_queue = true;
1389
mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1390
}
1391
}
1392
1393
/*
1394
* Mark TXQ as ready, even though it hasn't been fully configured yet,
1395
* to make sure no one else takes it.
1396
* This will allow avoiding re-acquiring the lock at the end of the
1397
* configuration. On error we'll mark it back as free.
1398
*/
1399
if (queue > 0 && !shared_queue)
1400
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1401
1402
/* This shouldn't happen - out of queues */
1403
if (WARN_ON(queue <= 0)) {
1404
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1405
tid, cfg.sta_id);
1406
return queue;
1407
}
1408
1409
/*
1410
* Actual en/disablement of aggregations is through the ADD_STA HCMD,
1411
* but for configuring the SCD to send A-MPDUs we need to mark the queue
1412
* as aggregatable.
1413
* Mark all DATA queues as allowing to be aggregated at some point
1414
*/
1415
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1416
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1417
1418
IWL_DEBUG_TX_QUEUES(mvm,
1419
"Allocating %squeue #%d to sta %d on tid %d\n",
1420
shared_queue ? "shared " : "", queue,
1421
mvmsta->deflink.sta_id, tid);
1422
1423
if (shared_queue) {
1424
/* Disable any open aggs on this queue */
1425
disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1426
1427
if (disable_agg_tids) {
1428
IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1429
queue);
1430
iwl_mvm_invalidate_sta_queue(mvm, queue,
1431
disable_agg_tids, false);
1432
}
1433
}
1434
1435
inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1436
1437
/*
1438
* Mark queue as shared in transport if shared
1439
* Note this has to be done after queue enablement because enablement
1440
* can also set this value, and there is no indication there to shared
1441
* queues
1442
*/
1443
if (shared_queue)
1444
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1445
1446
spin_lock_bh(&mvmsta->lock);
1447
/*
1448
* This looks racy, but it is not. We have only one packet for
1449
* this ra/tid in our Tx path since we stop the Qdisc when we
1450
* need to allocate a new TFD queue.
1451
*/
1452
if (inc_ssn) {
1453
mvmsta->tid_data[tid].seq_number += 0x10;
1454
ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1455
}
1456
mvmsta->tid_data[tid].txq_id = queue;
1457
mvmsta->tfd_queue_msk |= BIT(queue);
1458
queue_state = mvmsta->tid_data[tid].state;
1459
1460
if (mvmsta->reserved_queue == queue)
1461
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1462
spin_unlock_bh(&mvmsta->lock);
1463
1464
if (!shared_queue) {
1465
ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1466
if (ret)
1467
goto out_err;
1468
1469
/* If we need to re-enable aggregations... */
1470
if (queue_state == IWL_AGG_ON) {
1471
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1472
if (ret)
1473
goto out_err;
1474
}
1475
} else {
1476
/* Redirect queue, if needed */
1477
ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1478
wdg_timeout, false,
1479
iwl_mvm_txq_from_tid(sta, tid));
1480
if (ret)
1481
goto out_err;
1482
}
1483
1484
return 0;
1485
1486
out_err:
1487
queue_tmp = queue;
1488
iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid);
1489
1490
return ret;
1491
}
1492
1493
int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
1494
struct ieee80211_txq *txq)
1495
{
1496
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
1497
int ret = -EINVAL;
1498
1499
lockdep_assert_held(&mvm->mutex);
1500
1501
if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
1502
!txq->sta) {
1503
return 0;
1504
}
1505
1506
if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
1507
set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1508
ret = 0;
1509
}
1510
1511
local_bh_disable();
1512
spin_lock(&mvm->add_stream_lock);
1513
if (!list_empty(&mvmtxq->list))
1514
list_del_init(&mvmtxq->list);
1515
spin_unlock(&mvm->add_stream_lock);
1516
local_bh_enable();
1517
1518
return ret;
1519
}
1520
1521
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1522
{
1523
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1524
add_stream_wk);
1525
1526
guard(mvm)(mvm);
1527
1528
/* will reschedule to run after restart */
1529
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ||
1530
test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1531
return;
1532
1533
iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
1534
1535
while (!list_empty(&mvm->add_stream_txqs)) {
1536
struct iwl_mvm_txq *mvmtxq;
1537
struct ieee80211_txq *txq;
1538
u8 tid;
1539
1540
mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1541
struct iwl_mvm_txq, list);
1542
1543
txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1544
drv_priv);
1545
tid = txq->tid;
1546
if (tid == IEEE80211_NUM_TIDS)
1547
tid = IWL_MAX_TID_COUNT;
1548
1549
/*
1550
* We can't really do much here, but if this fails we can't
1551
* transmit anyway - so just don't transmit the frame etc.
1552
* and let them back up ... we've tried our best to allocate
1553
* a queue in the function itself.
1554
*/
1555
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1556
spin_lock_bh(&mvm->add_stream_lock);
1557
list_del_init(&mvmtxq->list);
1558
spin_unlock_bh(&mvm->add_stream_lock);
1559
continue;
1560
}
1561
1562
/* now we're ready, any remaining races/concurrency will be
1563
* handled in iwl_mvm_mac_itxq_xmit()
1564
*/
1565
set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1566
1567
local_bh_disable();
1568
spin_lock(&mvm->add_stream_lock);
1569
list_del_init(&mvmtxq->list);
1570
spin_unlock(&mvm->add_stream_lock);
1571
1572
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1573
local_bh_enable();
1574
}
1575
}
1576
1577
static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1578
struct ieee80211_sta *sta,
1579
enum nl80211_iftype vif_type)
1580
{
1581
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1582
int queue;
1583
1584
/* queue reserving is disabled on new TX path */
1585
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1586
return 0;
1587
1588
/* run the general cleanup/unsharing of queues */
1589
iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
1590
1591
/* Make sure we have free resources for this STA */
1592
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1593
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1594
(mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1595
IWL_MVM_QUEUE_FREE))
1596
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1597
else
1598
queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1599
IWL_MVM_DQA_MIN_DATA_QUEUE,
1600
IWL_MVM_DQA_MAX_DATA_QUEUE);
1601
if (queue < 0) {
1602
/* try again - this time kick out a queue if needed */
1603
queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
1604
if (queue < 0) {
1605
IWL_ERR(mvm, "No available queues for new station\n");
1606
return -ENOSPC;
1607
}
1608
}
1609
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1610
1611
mvmsta->reserved_queue = queue;
1612
1613
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1614
queue, mvmsta->deflink.sta_id);
1615
1616
return 0;
1617
}
1618
1619
/*
1620
* In DQA mode, after a HW restart the queues should be allocated as before, in
1621
* order to avoid race conditions when there are shared queues. This function
1622
* does the re-mapping and queue allocation.
1623
*
1624
* Note that re-enabling aggregations isn't done in this function.
1625
*/
1626
void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1627
struct ieee80211_sta *sta)
1628
{
1629
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1630
unsigned int wdg =
1631
iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif);
1632
int i;
1633
struct iwl_trans_txq_scd_cfg cfg = {
1634
.sta_id = mvm_sta->deflink.sta_id,
1635
.frame_limit = IWL_FRAME_LIMIT,
1636
};
1637
1638
/* Make sure reserved queue is still marked as such (if allocated) */
1639
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1640
mvm->queue_info[mvm_sta->reserved_queue].status =
1641
IWL_MVM_QUEUE_RESERVED;
1642
1643
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1644
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1645
int txq_id = tid_data->txq_id;
1646
int ac;
1647
1648
if (txq_id == IWL_MVM_INVALID_QUEUE)
1649
continue;
1650
1651
ac = tid_to_mac80211_ac[i];
1652
1653
if (iwl_mvm_has_new_tx_api(mvm)) {
1654
IWL_DEBUG_TX_QUEUES(mvm,
1655
"Re-mapping sta %d tid %d\n",
1656
mvm_sta->deflink.sta_id, i);
1657
txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta,
1658
mvm_sta->deflink.sta_id,
1659
i, wdg);
1660
/*
1661
* on failures, just set it to IWL_MVM_INVALID_QUEUE
1662
* to try again later, we have no other good way of
1663
* failing here
1664
*/
1665
if (txq_id < 0)
1666
txq_id = IWL_MVM_INVALID_QUEUE;
1667
tid_data->txq_id = txq_id;
1668
1669
/*
1670
* Since we don't set the seq number after reset, and HW
1671
* sets it now, FW reset will cause the seq num to start
1672
* at 0 again, so driver will need to update it
1673
* internally as well, so it keeps in sync with real val
1674
*/
1675
tid_data->seq_number = 0;
1676
} else {
1677
u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1678
1679
cfg.tid = i;
1680
cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1681
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1682
txq_id ==
1683
IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1684
1685
IWL_DEBUG_TX_QUEUES(mvm,
1686
"Re-mapping sta %d tid %d to queue %d\n",
1687
mvm_sta->deflink.sta_id, i,
1688
txq_id);
1689
1690
iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1691
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1692
}
1693
}
1694
}
1695
1696
static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1697
struct iwl_mvm_int_sta *sta,
1698
const u8 *addr,
1699
u16 mac_id, u16 color)
1700
{
1701
struct iwl_mvm_add_sta_cmd cmd;
1702
int ret;
1703
u32 status = ADD_STA_SUCCESS;
1704
1705
lockdep_assert_held(&mvm->mutex);
1706
1707
memset(&cmd, 0, sizeof(cmd));
1708
cmd.sta_id = sta->sta_id;
1709
1710
if (iwl_mvm_has_new_station_api(mvm->fw) &&
1711
sta->type == IWL_STA_AUX_ACTIVITY)
1712
cmd.mac_id_n_color = cpu_to_le32(mac_id);
1713
else
1714
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1715
color));
1716
1717
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1718
cmd.station_type = sta->type;
1719
1720
if (!iwl_mvm_has_new_tx_api(mvm))
1721
cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1722
cmd.tid_disable_tx = cpu_to_le16(0xffff);
1723
1724
if (addr)
1725
memcpy(cmd.addr, addr, ETH_ALEN);
1726
1727
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1728
iwl_mvm_add_sta_cmd_size(mvm),
1729
&cmd, &status);
1730
if (ret)
1731
return ret;
1732
1733
switch (status & IWL_ADD_STA_STATUS_MASK) {
1734
case ADD_STA_SUCCESS:
1735
IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1736
return 0;
1737
default:
1738
ret = -EIO;
1739
IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1740
status);
1741
break;
1742
}
1743
return ret;
1744
}
1745
1746
/* Initialize driver data of a new sta */
1747
int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1748
struct ieee80211_sta *sta, int sta_id, u8 sta_type)
1749
{
1750
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1751
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1752
struct iwl_mvm_rxq_dup_data *dup_data;
1753
int i, ret = 0;
1754
1755
lockdep_assert_held(&mvm->mutex);
1756
1757
mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1758
mvmvif->color);
1759
mvm_sta->vif = vif;
1760
1761
/* for MLD sta_id(s) should be allocated for each link before calling
1762
* this function
1763
*/
1764
if (!mvm->mld_api_is_used) {
1765
if (WARN_ON(sta_id == IWL_INVALID_STA))
1766
return -EINVAL;
1767
1768
mvm_sta->deflink.sta_id = sta_id;
1769
rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);
1770
1771
if (!mvm->trans->mac_cfg->gen2)
1772
mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1773
LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1774
else
1775
mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1776
LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1777
}
1778
1779
mvm_sta->tt_tx_protection = false;
1780
mvm_sta->sta_type = sta_type;
1781
1782
mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1783
1784
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1785
/*
1786
* Mark all queues for this STA as unallocated and defer TX
1787
* frames until the queue is allocated
1788
*/
1789
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1790
}
1791
1792
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1793
struct iwl_mvm_txq *mvmtxq =
1794
iwl_mvm_txq_from_mac80211(sta->txq[i]);
1795
1796
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1797
INIT_LIST_HEAD(&mvmtxq->list);
1798
atomic_set(&mvmtxq->tx_request, 0);
1799
}
1800
1801
if (iwl_mvm_has_new_rx_api(mvm)) {
1802
int q;
1803
1804
dup_data = kcalloc(mvm->trans->info.num_rxqs,
1805
sizeof(*dup_data), GFP_KERNEL);
1806
if (!dup_data)
1807
return -ENOMEM;
1808
/*
1809
* Initialize all the last_seq values to 0xffff which can never
1810
* compare equal to the frame's seq_ctrl in the check in
1811
* iwl_mvm_is_dup() since the lower 4 bits are the fragment
1812
* number and fragmented packets don't reach that function.
1813
*
1814
* This thus allows receiving a packet with seqno 0 and the
1815
* retry bit set as the very first packet on a new TID.
1816
*/
1817
for (q = 0; q < mvm->trans->info.num_rxqs; q++)
1818
memset(dup_data[q].last_seq, 0xff,
1819
sizeof(dup_data[q].last_seq));
1820
mvm_sta->dup_data = dup_data;
1821
}
1822
1823
if (!iwl_mvm_has_new_tx_api(mvm)) {
1824
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1825
ieee80211_vif_type_p2p(vif));
1826
if (ret)
1827
return ret;
1828
}
1829
1830
/*
1831
* if rs is registered with mac80211, then "add station" will be handled
1832
* via the corresponding ops, otherwise need to notify rate scaling here
1833
*/
1834
if (iwl_mvm_has_tlc_offload(mvm))
1835
iwl_mvm_rs_add_sta(mvm, mvm_sta);
1836
else
1837
spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock);
1838
1839
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1840
1841
/* MPDUs are counted only when EMLSR is possible */
1842
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
1843
!sta->tdls && ieee80211_vif_is_mld(vif)) {
1844
mvm_sta->mpdu_counters =
1845
kcalloc(mvm->trans->info.num_rxqs,
1846
sizeof(*mvm_sta->mpdu_counters),
1847
GFP_KERNEL);
1848
if (mvm_sta->mpdu_counters)
1849
for (int q = 0; q < mvm->trans->info.num_rxqs; q++)
1850
spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
1851
}
1852
1853
return 0;
1854
}
1855
1856
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1857
struct ieee80211_vif *vif,
1858
struct ieee80211_sta *sta)
1859
{
1860
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1861
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1862
int ret, sta_id;
1863
bool sta_update = false;
1864
unsigned int sta_flags = 0;
1865
1866
lockdep_assert_held(&mvm->mutex);
1867
1868
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1869
sta_id = iwl_mvm_find_free_sta_id(mvm,
1870
ieee80211_vif_type_p2p(vif));
1871
else
1872
sta_id = mvm_sta->deflink.sta_id;
1873
1874
if (sta_id == IWL_INVALID_STA)
1875
return -ENOSPC;
1876
1877
spin_lock_init(&mvm_sta->lock);
1878
1879
/* if this is a HW restart re-alloc existing queues */
1880
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1881
struct iwl_mvm_int_sta tmp_sta = {
1882
.sta_id = sta_id,
1883
.type = mvm_sta->sta_type,
1884
};
1885
1886
/* First add an empty station since allocating
1887
* a queue requires a valid station
1888
*/
1889
ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1890
mvmvif->id, mvmvif->color);
1891
if (ret)
1892
goto err;
1893
1894
iwl_mvm_realloc_queues_after_restart(mvm, sta);
1895
sta_update = true;
1896
sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1897
goto update_fw;
1898
}
1899
1900
ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id,
1901
sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK);
1902
if (ret)
1903
goto err;
1904
1905
update_fw:
1906
ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1907
if (ret)
1908
goto err;
1909
1910
if (vif->type == NL80211_IFTYPE_STATION) {
1911
if (!sta->tdls) {
1912
WARN_ON(mvmvif->deflink.ap_sta_id != IWL_INVALID_STA);
1913
mvmvif->deflink.ap_sta_id = sta_id;
1914
} else {
1915
WARN_ON(mvmvif->deflink.ap_sta_id == IWL_INVALID_STA);
1916
}
1917
}
1918
1919
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1920
1921
return 0;
1922
1923
err:
1924
return ret;
1925
}
1926
1927
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1928
bool drain)
1929
{
1930
struct iwl_mvm_add_sta_cmd cmd = {};
1931
int ret;
1932
u32 status;
1933
1934
lockdep_assert_held(&mvm->mutex);
1935
1936
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1937
cmd.sta_id = mvmsta->deflink.sta_id;
1938
cmd.add_modify = STA_MODE_MODIFY;
1939
cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1940
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1941
1942
status = ADD_STA_SUCCESS;
1943
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1944
iwl_mvm_add_sta_cmd_size(mvm),
1945
&cmd, &status);
1946
if (ret)
1947
return ret;
1948
1949
switch (status & IWL_ADD_STA_STATUS_MASK) {
1950
case ADD_STA_SUCCESS:
1951
IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1952
mvmsta->deflink.sta_id);
1953
break;
1954
default:
1955
ret = -EIO;
1956
#if defined(__linux__)
1957
IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1958
mvmsta->deflink.sta_id);
1959
#elif defined(__FreeBSD__)
1960
IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n",
1961
mvmsta->deflink.sta_id, status);
1962
#endif
1963
break;
1964
}
1965
1966
return ret;
1967
}
1968
1969
/*
1970
* Remove a station from the FW table. Before sending the command to remove
1971
* the station validate that the station is indeed known to the driver (sanity
1972
* only).
1973
*/
1974
static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1975
{
1976
struct ieee80211_sta *sta;
1977
struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1978
.sta_id = sta_id,
1979
};
1980
int ret;
1981
1982
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1983
lockdep_is_held(&mvm->mutex));
1984
1985
/* Note: internal stations are marked as error values */
1986
if (!sta) {
1987
IWL_ERR(mvm, "Invalid station id\n");
1988
return -EINVAL;
1989
}
1990
1991
ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1992
sizeof(rm_sta_cmd), &rm_sta_cmd);
1993
if (ret) {
1994
IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1995
return ret;
1996
}
1997
1998
return 0;
1999
}
2000
2001
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
2002
struct ieee80211_vif *vif,
2003
struct ieee80211_sta *sta)
2004
{
2005
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2006
int i;
2007
2008
lockdep_assert_held(&mvm->mutex);
2009
2010
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
2011
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
2012
continue;
2013
2014
iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id,
2015
&mvm_sta->tid_data[i].txq_id, i);
2016
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
2017
}
2018
2019
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
2020
struct iwl_mvm_txq *mvmtxq =
2021
iwl_mvm_txq_from_mac80211(sta->txq[i]);
2022
2023
spin_lock_bh(&mvm->add_stream_lock);
2024
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
2025
list_del_init(&mvmtxq->list);
2026
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
2027
spin_unlock_bh(&mvm->add_stream_lock);
2028
}
2029
}
2030
2031
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
2032
struct iwl_mvm_sta *mvm_sta)
2033
{
2034
int i;
2035
2036
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
2037
u16 txq_id;
2038
int ret;
2039
2040
spin_lock_bh(&mvm_sta->lock);
2041
txq_id = mvm_sta->tid_data[i].txq_id;
2042
spin_unlock_bh(&mvm_sta->lock);
2043
2044
if (txq_id == IWL_MVM_INVALID_QUEUE)
2045
continue;
2046
2047
ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2048
if (ret)
2049
return ret;
2050
}
2051
2052
return 0;
2053
}
2054
2055
/* Execute the common part for both MLD and non-MLD modes.
2056
* Returns if we're done with removing the station, either
2057
* with error or success
2058
*/
2059
void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2060
struct ieee80211_sta *sta,
2061
struct ieee80211_link_sta *link_sta)
2062
{
2063
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2064
struct iwl_mvm_vif_link_info *mvm_link =
2065
mvmvif->link[link_sta->link_id];
2066
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2067
struct iwl_mvm_link_sta *mvm_link_sta;
2068
u8 sta_id;
2069
2070
lockdep_assert_held(&mvm->mutex);
2071
2072
mvm_link_sta =
2073
rcu_dereference_protected(mvm_sta->link[link_sta->link_id],
2074
lockdep_is_held(&mvm->mutex));
2075
sta_id = mvm_link_sta->sta_id;
2076
2077
if (vif->type == NL80211_IFTYPE_STATION &&
2078
mvm_link->ap_sta_id == sta_id) {
2079
/* first remove remaining keys */
2080
iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link,
2081
link_sta->link_id);
2082
2083
mvm_link->ap_sta_id = IWL_INVALID_STA;
2084
}
2085
2086
/*
2087
* This shouldn't happen - the TDLS channel switch should be canceled
2088
* before the STA is removed.
2089
*/
2090
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
2091
mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
2092
cancel_delayed_work(&mvm->tdls_cs.dwork);
2093
}
2094
}
2095
2096
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
2097
struct ieee80211_vif *vif,
2098
struct ieee80211_sta *sta)
2099
{
2100
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2101
int ret;
2102
2103
lockdep_assert_held(&mvm->mutex);
2104
2105
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
2106
if (ret)
2107
return ret;
2108
2109
/* flush its queues here since we are freeing mvm_sta */
2110
ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
2111
mvm_sta->tfd_queue_msk);
2112
if (ret)
2113
return ret;
2114
if (iwl_mvm_has_new_tx_api(mvm)) {
2115
ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2116
} else {
2117
u32 q_mask = mvm_sta->tfd_queue_msk;
2118
2119
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2120
q_mask);
2121
}
2122
if (ret)
2123
return ret;
2124
2125
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
2126
2127
iwl_mvm_disable_sta_queues(mvm, vif, sta);
2128
2129
/* If there is a TXQ still marked as reserved - free it */
2130
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2131
u8 reserved_txq = mvm_sta->reserved_queue;
2132
enum iwl_mvm_queue_status *status;
2133
2134
/*
2135
* If no traffic has gone through the reserved TXQ - it
2136
* is still marked as IWL_MVM_QUEUE_RESERVED, and
2137
* should be manually marked as free again
2138
*/
2139
status = &mvm->queue_info[reserved_txq].status;
2140
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2141
(*status != IWL_MVM_QUEUE_FREE),
2142
"sta_id %d reserved txq %d status %d",
2143
mvm_sta->deflink.sta_id, reserved_txq, *status))
2144
return -EINVAL;
2145
2146
*status = IWL_MVM_QUEUE_FREE;
2147
}
2148
2149
iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink);
2150
2151
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);
2152
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);
2153
2154
return ret;
2155
}
2156
2157
int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2158
struct ieee80211_vif *vif,
2159
u8 sta_id)
2160
{
2161
int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2162
2163
lockdep_assert_held(&mvm->mutex);
2164
2165
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2166
return ret;
2167
}
2168
2169
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2170
struct iwl_mvm_int_sta *sta,
2171
u32 qmask, enum nl80211_iftype iftype,
2172
u8 type)
2173
{
2174
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2175
sta->sta_id == IWL_INVALID_STA) {
2176
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2177
if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
2178
return -ENOSPC;
2179
}
2180
2181
sta->tfd_queue_msk = qmask;
2182
sta->type = type;
2183
2184
/* put a non-NULL value so iterating over the stations won't stop */
2185
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2186
return 0;
2187
}
2188
2189
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2190
{
2191
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2192
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2193
sta->sta_id = IWL_INVALID_STA;
2194
}
2195
2196
static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2197
u8 sta_id, u8 fifo)
2198
{
2199
unsigned int wdg_timeout =
2200
mvm->trans->mac_cfg->base->wd_timeout;
2201
struct iwl_trans_txq_scd_cfg cfg = {
2202
.fifo = fifo,
2203
.sta_id = sta_id,
2204
.tid = IWL_MAX_TID_COUNT,
2205
.aggregate = false,
2206
.frame_limit = IWL_FRAME_LIMIT,
2207
};
2208
2209
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2210
2211
iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2212
}
2213
2214
static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2215
{
2216
unsigned int wdg_timeout =
2217
mvm->trans->mac_cfg->base->wd_timeout;
2218
2219
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2220
2221
return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT,
2222
wdg_timeout);
2223
}
2224
2225
static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2226
int maccolor, u8 *addr,
2227
struct iwl_mvm_int_sta *sta,
2228
u16 *queue, int fifo)
2229
{
2230
int ret;
2231
2232
/* Map queue to fifo - needs to happen before adding station */
2233
if (!iwl_mvm_has_new_tx_api(mvm))
2234
iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2235
2236
ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2237
if (ret) {
2238
if (!iwl_mvm_has_new_tx_api(mvm))
2239
iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue,
2240
IWL_MAX_TID_COUNT);
2241
return ret;
2242
}
2243
2244
/*
2245
* For 22000 firmware and on we cannot add queue to a station unknown
2246
* to firmware so enable queue here - after the station was added
2247
*/
2248
if (iwl_mvm_has_new_tx_api(mvm)) {
2249
int txq;
2250
2251
txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2252
if (txq < 0) {
2253
iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2254
return txq;
2255
}
2256
2257
*queue = txq;
2258
}
2259
2260
return 0;
2261
}
2262
2263
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2264
{
2265
int ret;
2266
u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 :
2267
BIT(mvm->aux_queue);
2268
2269
lockdep_assert_held(&mvm->mutex);
2270
2271
/* Allocate aux station and assign to it the aux queue */
2272
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask,
2273
NL80211_IFTYPE_UNSPECIFIED,
2274
IWL_STA_AUX_ACTIVITY);
2275
if (ret)
2276
return ret;
2277
2278
/*
2279
* In CDB NICs we need to specify which lmac to use for aux activity
2280
* using the mac_id argument place to send lmac_id to the function
2281
*/
2282
ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2283
&mvm->aux_sta, &mvm->aux_queue,
2284
IWL_MVM_TX_FIFO_MCAST);
2285
if (ret) {
2286
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2287
return ret;
2288
}
2289
2290
return 0;
2291
}
2292
2293
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2294
{
2295
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2296
2297
lockdep_assert_held(&mvm->mutex);
2298
2299
return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2300
NULL, &mvm->snif_sta,
2301
&mvm->snif_queue,
2302
IWL_MVM_TX_FIFO_BE);
2303
}
2304
2305
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2306
{
2307
int ret;
2308
2309
lockdep_assert_held(&mvm->mutex);
2310
2311
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_INVALID_STA))
2312
return -EINVAL;
2313
2314
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,
2315
&mvm->snif_queue, IWL_MAX_TID_COUNT);
2316
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2317
if (ret)
2318
IWL_WARN(mvm, "Failed sending remove station\n");
2319
2320
return ret;
2321
}
2322
2323
int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2324
{
2325
int ret;
2326
2327
lockdep_assert_held(&mvm->mutex);
2328
2329
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_INVALID_STA))
2330
return -EINVAL;
2331
2332
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,
2333
&mvm->aux_queue, IWL_MAX_TID_COUNT);
2334
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2335
if (ret)
2336
IWL_WARN(mvm, "Failed sending remove station\n");
2337
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2338
2339
return ret;
2340
}
2341
2342
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2343
{
2344
iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2345
}
2346
2347
/*
2348
* Send the add station command for the vif's broadcast station.
2349
* Assumes that the station was already allocated.
2350
*
2351
* @mvm: the mvm component
2352
* @vif: the interface to which the broadcast station is added
2353
* @bsta: the broadcast station to add.
2354
*/
2355
int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2356
{
2357
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2358
struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2359
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2360
const u8 *baddr = _baddr;
2361
int queue;
2362
int ret;
2363
unsigned int wdg_timeout =
2364
iwl_mvm_get_wd_timeout(mvm, vif);
2365
struct iwl_trans_txq_scd_cfg cfg = {
2366
.fifo = IWL_MVM_TX_FIFO_VO,
2367
.sta_id = mvmvif->deflink.bcast_sta.sta_id,
2368
.tid = IWL_MAX_TID_COUNT,
2369
.aggregate = false,
2370
.frame_limit = IWL_FRAME_LIMIT,
2371
};
2372
2373
lockdep_assert_held(&mvm->mutex);
2374
2375
if (!iwl_mvm_has_new_tx_api(mvm)) {
2376
if (vif->type == NL80211_IFTYPE_AP ||
2377
vif->type == NL80211_IFTYPE_ADHOC) {
2378
queue = mvm->probe_queue;
2379
} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2380
queue = mvm->p2p_dev_queue;
2381
} else {
2382
WARN(1, "Missing required TXQ for adding bcast STA\n");
2383
return -EINVAL;
2384
}
2385
2386
bsta->tfd_queue_msk |= BIT(queue);
2387
2388
iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2389
}
2390
2391
if (vif->type == NL80211_IFTYPE_ADHOC)
2392
baddr = vif->bss_conf.bssid;
2393
2394
if (WARN_ON_ONCE(bsta->sta_id == IWL_INVALID_STA))
2395
return -ENOSPC;
2396
2397
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2398
mvmvif->id, mvmvif->color);
2399
if (ret)
2400
return ret;
2401
2402
/*
2403
* For 22000 firmware and on we cannot add queue to a station unknown
2404
* to firmware so enable queue here - after the station was added
2405
*/
2406
if (iwl_mvm_has_new_tx_api(mvm)) {
2407
queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id,
2408
IWL_MAX_TID_COUNT,
2409
wdg_timeout);
2410
if (queue < 0) {
2411
iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2412
return queue;
2413
}
2414
2415
if (vif->type == NL80211_IFTYPE_AP ||
2416
vif->type == NL80211_IFTYPE_ADHOC) {
2417
/* for queue management */
2418
mvm->probe_queue = queue;
2419
/* for use in TX */
2420
mvmvif->deflink.mgmt_queue = queue;
2421
} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2422
mvm->p2p_dev_queue = queue;
2423
}
2424
} else if (vif->type == NL80211_IFTYPE_AP ||
2425
vif->type == NL80211_IFTYPE_ADHOC) {
2426
/* set it for use in TX */
2427
mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2428
}
2429
2430
return 0;
2431
}
2432
2433
void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2434
struct ieee80211_vif *vif)
2435
{
2436
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2437
u16 *queueptr, queue;
2438
2439
lockdep_assert_held(&mvm->mutex);
2440
2441
iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
2442
mvmvif->deflink.bcast_sta.tfd_queue_msk);
2443
2444
switch (vif->type) {
2445
case NL80211_IFTYPE_AP:
2446
case NL80211_IFTYPE_ADHOC:
2447
queueptr = &mvm->probe_queue;
2448
break;
2449
case NL80211_IFTYPE_P2P_DEVICE:
2450
queueptr = &mvm->p2p_dev_queue;
2451
break;
2452
default:
2453
WARN(1, "Can't free bcast queue on vif type %d\n",
2454
vif->type);
2455
return;
2456
}
2457
2458
queue = *queueptr;
2459
iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id,
2460
queueptr, IWL_MAX_TID_COUNT);
2461
2462
if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)
2463
mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2464
2465
if (iwl_mvm_has_new_tx_api(mvm))
2466
return;
2467
2468
WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue)));
2469
mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue);
2470
}
2471
2472
/* Send the FW a request to remove the station from its internal data
2473
* structures, but DO NOT remove the entry from the local data structures. */
2474
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2475
{
2476
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2477
int ret;
2478
2479
lockdep_assert_held(&mvm->mutex);
2480
2481
iwl_mvm_free_bcast_sta_queues(mvm, vif);
2482
2483
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id);
2484
if (ret)
2485
IWL_WARN(mvm, "Failed sending remove station\n");
2486
return ret;
2487
}
2488
2489
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2490
{
2491
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2492
2493
lockdep_assert_held(&mvm->mutex);
2494
2495
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0,
2496
ieee80211_vif_type_p2p(vif),
2497
IWL_STA_GENERAL_PURPOSE);
2498
}
2499
2500
/* Allocate a new station entry for the broadcast station to the given vif,
2501
* and send it to the FW.
2502
* Note that each P2P mac should have its own broadcast station.
2503
*
2504
* @mvm: the mvm component
2505
* @vif: the interface to which the broadcast station is added
2506
* @bsta: the broadcast station to add. */
2507
int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2508
{
2509
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2510
struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2511
int ret;
2512
2513
lockdep_assert_held(&mvm->mutex);
2514
2515
ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2516
if (ret)
2517
return ret;
2518
2519
ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2520
2521
if (ret)
2522
iwl_mvm_dealloc_int_sta(mvm, bsta);
2523
2524
return ret;
2525
}
2526
2527
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2528
{
2529
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2530
2531
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta);
2532
}
2533
2534
/*
2535
* Send the FW a request to remove the station from its internal data
2536
* structures, and in addition remove it from the local data structure.
2537
*/
2538
int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2539
{
2540
int ret;
2541
2542
lockdep_assert_held(&mvm->mutex);
2543
2544
ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2545
2546
iwl_mvm_dealloc_bcast_sta(mvm, vif);
2547
2548
return ret;
2549
}
2550
2551
/*
2552
* Allocate a new station entry for the multicast station to the given vif,
2553
* and send it to the FW.
2554
* Note that each AP/GO mac should have its own multicast station.
2555
*
2556
* @mvm: the mvm component
2557
* @vif: the interface to which the multicast station is added
2558
*/
2559
int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2560
{
2561
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2562
struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta;
2563
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2564
const u8 *maddr = _maddr;
2565
struct iwl_trans_txq_scd_cfg cfg = {
2566
.fifo = vif->type == NL80211_IFTYPE_AP ?
2567
IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2568
.sta_id = msta->sta_id,
2569
.tid = 0,
2570
.aggregate = false,
2571
.frame_limit = IWL_FRAME_LIMIT,
2572
};
2573
unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
2574
int ret;
2575
2576
lockdep_assert_held(&mvm->mutex);
2577
2578
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2579
vif->type != NL80211_IFTYPE_ADHOC))
2580
return -EOPNOTSUPP;
2581
2582
/*
2583
* In IBSS, ieee80211_check_queues() sets the cab_queue to be
2584
* invalid, so make sure we use the queue we want.
2585
* Note that this is done here as we want to avoid making DQA
2586
* changes in mac80211 layer.
2587
*/
2588
if (vif->type == NL80211_IFTYPE_ADHOC)
2589
mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2590
2591
/*
2592
* While in previous FWs we had to exclude cab queue from TFD queue
2593
* mask, now it is needed as any other queue.
2594
*/
2595
if (!iwl_mvm_has_new_tx_api(mvm) &&
2596
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2597
iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2598
&cfg,
2599
timeout);
2600
msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue);
2601
}
2602
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2603
mvmvif->id, mvmvif->color);
2604
if (ret)
2605
goto err;
2606
2607
/*
2608
* Enable cab queue after the ADD_STA command is sent.
2609
* This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2610
* command with unknown station id, and for FW that doesn't support
2611
* station API since the cab queue is not included in the
2612
* tfd_queue_mask.
2613
*/
2614
if (iwl_mvm_has_new_tx_api(mvm)) {
2615
int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id,
2616
0, timeout);
2617
if (queue < 0) {
2618
ret = queue;
2619
goto err;
2620
}
2621
mvmvif->deflink.cab_queue = queue;
2622
} else if (!fw_has_api(&mvm->fw->ucode_capa,
2623
IWL_UCODE_TLV_API_STA_TYPE))
2624
iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2625
&cfg,
2626
timeout);
2627
2628
return 0;
2629
err:
2630
iwl_mvm_dealloc_int_sta(mvm, msta);
2631
return ret;
2632
}
2633
2634
static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2635
struct ieee80211_key_conf *keyconf,
2636
bool mcast)
2637
{
2638
union {
2639
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2640
struct iwl_mvm_add_sta_key_cmd cmd;
2641
} u = {};
2642
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2643
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2644
__le16 key_flags;
2645
int ret, size;
2646
u32 status;
2647
2648
/* This is a valid situation for GTK removal */
2649
if (sta_id == IWL_INVALID_STA)
2650
return 0;
2651
2652
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2653
STA_KEY_FLG_KEYID_MSK);
2654
key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2655
key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2656
2657
if (mcast)
2658
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2659
2660
/*
2661
* The fields assigned here are in the same location at the start
2662
* of the command, so we can do this union trick.
2663
*/
2664
u.cmd.common.key_flags = key_flags;
2665
u.cmd.common.key_offset = keyconf->hw_key_idx;
2666
u.cmd.common.sta_id = sta_id;
2667
2668
size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2669
2670
status = ADD_STA_SUCCESS;
2671
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2672
&status);
2673
2674
switch (status) {
2675
case ADD_STA_SUCCESS:
2676
IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2677
break;
2678
default:
2679
ret = -EIO;
2680
IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2681
break;
2682
}
2683
2684
return ret;
2685
}
2686
2687
/*
2688
* Send the FW a request to remove the station from its internal data
2689
* structures, and in addition remove it from the local data structure.
2690
*/
2691
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2692
{
2693
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2694
int ret;
2695
2696
lockdep_assert_held(&mvm->mutex);
2697
2698
iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
2699
mvmvif->deflink.mcast_sta.tfd_queue_msk);
2700
2701
iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
2702
&mvmvif->deflink.cab_queue, 0);
2703
2704
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id);
2705
if (ret)
2706
IWL_WARN(mvm, "Failed sending remove station\n");
2707
2708
return ret;
2709
}
2710
2711
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2712
{
2713
struct iwl_mvm_delba_data notif = {
2714
.baid = baid,
2715
};
2716
2717
iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2718
&notif, sizeof(notif));
2719
};
2720
2721
static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2722
struct iwl_mvm_baid_data *data)
2723
{
2724
int i;
2725
2726
iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2727
2728
for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
2729
int j;
2730
struct iwl_mvm_reorder_buffer *reorder_buf =
2731
&data->reorder_buf[i];
2732
struct iwl_mvm_reorder_buf_entry *entries =
2733
&data->entries[i * data->entries_per_queue];
2734
2735
spin_lock_bh(&reorder_buf->lock);
2736
if (likely(!reorder_buf->num_stored)) {
2737
spin_unlock_bh(&reorder_buf->lock);
2738
continue;
2739
}
2740
2741
/*
2742
* This shouldn't happen in regular DELBA since the internal
2743
* delBA notification should trigger a release of all frames in
2744
* the reorder buffer.
2745
*/
2746
WARN_ON(1);
2747
2748
for (j = 0; j < data->buf_size; j++)
2749
__skb_queue_purge(&entries[j].frames);
2750
2751
spin_unlock_bh(&reorder_buf->lock);
2752
}
2753
}
2754
2755
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2756
struct iwl_mvm_baid_data *data,
2757
u16 ssn)
2758
{
2759
int i;
2760
2761
for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
2762
struct iwl_mvm_reorder_buffer *reorder_buf =
2763
&data->reorder_buf[i];
2764
struct iwl_mvm_reorder_buf_entry *entries =
2765
&data->entries[i * data->entries_per_queue];
2766
int j;
2767
2768
reorder_buf->num_stored = 0;
2769
reorder_buf->head_sn = ssn;
2770
spin_lock_init(&reorder_buf->lock);
2771
reorder_buf->queue = i;
2772
reorder_buf->valid = false;
2773
for (j = 0; j < data->buf_size; j++)
2774
__skb_queue_head_init(&entries[j].frames);
2775
}
2776
}
2777
2778
static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2779
struct ieee80211_sta *sta,
2780
bool start, int tid, u16 ssn,
2781
u16 buf_size)
2782
{
2783
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2784
struct iwl_mvm_add_sta_cmd cmd = {
2785
.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2786
.sta_id = mvm_sta->deflink.sta_id,
2787
.add_modify = STA_MODE_MODIFY,
2788
};
2789
u32 status;
2790
int ret;
2791
2792
if (start) {
2793
cmd.add_immediate_ba_tid = tid;
2794
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2795
cmd.rx_ba_window = cpu_to_le16(buf_size);
2796
cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2797
} else {
2798
cmd.remove_immediate_ba_tid = tid;
2799
cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2800
}
2801
2802
status = ADD_STA_SUCCESS;
2803
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2804
iwl_mvm_add_sta_cmd_size(mvm),
2805
&cmd, &status);
2806
if (ret)
2807
return ret;
2808
2809
switch (status & IWL_ADD_STA_STATUS_MASK) {
2810
case ADD_STA_SUCCESS:
2811
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2812
start ? "start" : "stopp");
2813
if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2814
!(status & IWL_ADD_STA_BAID_VALID_MASK)))
2815
return -EINVAL;
2816
return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2817
case ADD_STA_IMMEDIATE_BA_FAILURE:
2818
IWL_WARN(mvm, "RX BA Session refused by fw\n");
2819
return -ENOSPC;
2820
default:
2821
IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2822
start ? "start" : "stopp", status);
2823
return -EIO;
2824
}
2825
}
2826
2827
static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2828
struct ieee80211_sta *sta,
2829
bool start, int tid, u16 ssn,
2830
u16 buf_size, int baid)
2831
{
2832
struct iwl_rx_baid_cfg_cmd cmd = {
2833
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2834
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2835
};
2836
struct iwl_host_cmd hcmd = {
2837
.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
2838
.flags = CMD_SEND_IN_RFKILL,
2839
.len[0] = sizeof(cmd),
2840
.data[0] = &cmd,
2841
};
2842
int ret;
2843
2844
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2845
2846
if (start) {
2847
cmd.alloc.sta_id_mask =
2848
cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2849
cmd.alloc.tid = tid;
2850
cmd.alloc.ssn = cpu_to_le16(ssn);
2851
cmd.alloc.win_size = cpu_to_le16(buf_size);
2852
baid = -EIO;
2853
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
2854
cmd.remove_v1.baid = cpu_to_le32(baid);
2855
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2856
} else {
2857
cmd.remove.sta_id_mask =
2858
cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2859
cmd.remove.tid = cpu_to_le32(tid);
2860
}
2861
2862
ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
2863
if (ret)
2864
return ret;
2865
2866
if (!start) {
2867
/* ignore firmware baid on remove */
2868
baid = 0;
2869
}
2870
2871
IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2872
start ? "start" : "stopp");
2873
2874
if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2875
return -EINVAL;
2876
2877
return baid;
2878
}
2879
2880
static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2881
bool start, int tid, u16 ssn, u16 buf_size,
2882
int baid)
2883
{
2884
if (fw_has_capa(&mvm->fw->ucode_capa,
2885
IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2886
return iwl_mvm_fw_baid_op_cmd(mvm, sta, start,
2887
tid, ssn, buf_size, baid);
2888
2889
return iwl_mvm_fw_baid_op_sta(mvm, sta, start,
2890
tid, ssn, buf_size);
2891
}
2892
2893
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2894
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2895
{
2896
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2897
struct iwl_mvm_baid_data *baid_data = NULL;
2898
int ret, baid;
2899
u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2900
IWL_MAX_BAID_OLD;
2901
2902
lockdep_assert_held(&mvm->mutex);
2903
2904
if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2905
IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2906
return -ENOSPC;
2907
}
2908
2909
if (iwl_mvm_has_new_rx_api(mvm) && start) {
2910
u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2911
2912
/* sparse doesn't like the __align() so don't check */
2913
#ifndef __CHECKER__
2914
/*
2915
* The division below will be OK if either the cache line size
2916
* can be divided by the entry size (ALIGN will round up) or if
2917
* the entry size can be divided by the cache line size, in
2918
* which case the ALIGN() will do nothing.
2919
*/
2920
BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2921
sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2922
#endif
2923
2924
/*
2925
* Upward align the reorder buffer size to fill an entire cache
2926
* line for each queue, to avoid sharing cache lines between
2927
* different queues.
2928
*/
2929
reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2930
2931
/*
2932
* Allocate here so if allocation fails we can bail out early
2933
* before starting the BA session in the firmware
2934
*/
2935
baid_data = kzalloc(sizeof(*baid_data) +
2936
mvm->trans->info.num_rxqs *
2937
reorder_buf_size,
2938
GFP_KERNEL);
2939
if (!baid_data)
2940
return -ENOMEM;
2941
2942
/*
2943
* This division is why we need the above BUILD_BUG_ON(),
2944
* if that doesn't hold then this will not be right.
2945
*/
2946
baid_data->entries_per_queue =
2947
reorder_buf_size / sizeof(baid_data->entries[0]);
2948
}
2949
2950
if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2951
baid = mvm_sta->tid_to_baid[tid];
2952
} else {
2953
/* we don't really need it in this case */
2954
baid = -1;
2955
}
2956
2957
/* Don't send command to remove (start=0) BAID during restart */
2958
if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2959
baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size,
2960
baid);
2961
2962
if (baid < 0) {
2963
ret = baid;
2964
goto out_free;
2965
}
2966
2967
if (start) {
2968
mvm->rx_ba_sessions++;
2969
2970
if (!iwl_mvm_has_new_rx_api(mvm))
2971
return 0;
2972
2973
baid_data->baid = baid;
2974
baid_data->timeout = timeout;
2975
baid_data->last_rx = jiffies;
2976
baid_data->rcu_ptr = &mvm->baid_map[baid];
2977
timer_setup(&baid_data->session_timer,
2978
iwl_mvm_rx_agg_session_expired, 0);
2979
baid_data->mvm = mvm;
2980
baid_data->tid = tid;
2981
baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
2982
baid_data->buf_size = buf_size;
2983
2984
mvm_sta->tid_to_baid[tid] = baid;
2985
if (timeout)
2986
mod_timer(&baid_data->session_timer,
2987
TU_TO_EXP_TIME(timeout * 2));
2988
2989
iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);
2990
/*
2991
* protect the BA data with RCU to cover a case where our
2992
* internal RX sync mechanism will timeout (not that it's
2993
* supposed to happen) and we will free the session data while
2994
* RX is being processed in parallel
2995
*/
2996
IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2997
mvm_sta->deflink.sta_id, tid, baid);
2998
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2999
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
3000
} else {
3001
baid = mvm_sta->tid_to_baid[tid];
3002
3003
if (mvm->rx_ba_sessions > 0)
3004
/* check that restart flow didn't zero the counter */
3005
mvm->rx_ba_sessions--;
3006
if (!iwl_mvm_has_new_rx_api(mvm))
3007
return 0;
3008
3009
if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
3010
return -EINVAL;
3011
3012
baid_data = rcu_access_pointer(mvm->baid_map[baid]);
3013
if (WARN_ON(!baid_data))
3014
return -EINVAL;
3015
3016
/* synchronize all rx queues so we can safely delete */
3017
iwl_mvm_free_reorder(mvm, baid_data);
3018
timer_shutdown_sync(&baid_data->session_timer);
3019
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
3020
kfree_rcu(baid_data, rcu_head);
3021
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
3022
}
3023
return 0;
3024
3025
out_free:
3026
kfree(baid_data);
3027
return ret;
3028
}
3029
3030
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3031
int tid, u8 queue, bool start)
3032
{
3033
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3034
struct iwl_mvm_add_sta_cmd cmd = {};
3035
int ret;
3036
u32 status;
3037
3038
lockdep_assert_held(&mvm->mutex);
3039
3040
if (start) {
3041
mvm_sta->tfd_queue_msk |= BIT(queue);
3042
mvm_sta->tid_disable_agg &= ~BIT(tid);
3043
} else {
3044
/* In DQA-mode the queue isn't removed on agg termination */
3045
mvm_sta->tid_disable_agg |= BIT(tid);
3046
}
3047
3048
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
3049
cmd.sta_id = mvm_sta->deflink.sta_id;
3050
cmd.add_modify = STA_MODE_MODIFY;
3051
if (!iwl_mvm_has_new_tx_api(mvm))
3052
cmd.modify_mask = STA_MODIFY_QUEUES;
3053
cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
3054
cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
3055
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
3056
3057
status = ADD_STA_SUCCESS;
3058
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
3059
iwl_mvm_add_sta_cmd_size(mvm),
3060
&cmd, &status);
3061
if (ret)
3062
return ret;
3063
3064
switch (status & IWL_ADD_STA_STATUS_MASK) {
3065
case ADD_STA_SUCCESS:
3066
break;
3067
default:
3068
ret = -EIO;
3069
IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
3070
start ? "start" : "stopp", status);
3071
break;
3072
}
3073
3074
return ret;
3075
}
3076
3077
const u8 tid_to_mac80211_ac[] = {
3078
IEEE80211_AC_BE,
3079
IEEE80211_AC_BK,
3080
IEEE80211_AC_BK,
3081
IEEE80211_AC_BE,
3082
IEEE80211_AC_VI,
3083
IEEE80211_AC_VI,
3084
IEEE80211_AC_VO,
3085
IEEE80211_AC_VO,
3086
IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
3087
};
3088
3089
static const u8 tid_to_ucode_ac[] = {
3090
AC_BE,
3091
AC_BK,
3092
AC_BK,
3093
AC_BE,
3094
AC_VI,
3095
AC_VI,
3096
AC_VO,
3097
AC_VO,
3098
};
3099
3100
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3101
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3102
{
3103
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3104
struct iwl_mvm_tid_data *tid_data;
3105
u16 normalized_ssn;
3106
u16 txq_id;
3107
int ret;
3108
3109
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
3110
return -EINVAL;
3111
3112
if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
3113
mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
3114
IWL_ERR(mvm,
3115
"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
3116
mvmsta->tid_data[tid].state);
3117
return -ENXIO;
3118
}
3119
3120
lockdep_assert_held(&mvm->mutex);
3121
3122
if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
3123
iwl_mvm_has_new_tx_api(mvm)) {
3124
u8 ac = tid_to_mac80211_ac[tid];
3125
3126
ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
3127
if (ret)
3128
return ret;
3129
}
3130
3131
spin_lock_bh(&mvmsta->lock);
3132
3133
/*
3134
* Note the possible cases:
3135
* 1. An enabled TXQ - TXQ needs to become agg'ed
3136
* 2. The TXQ hasn't yet been enabled, so find a free one and mark
3137
* it as reserved
3138
*/
3139
txq_id = mvmsta->tid_data[tid].txq_id;
3140
if (txq_id == IWL_MVM_INVALID_QUEUE) {
3141
ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
3142
IWL_MVM_DQA_MIN_DATA_QUEUE,
3143
IWL_MVM_DQA_MAX_DATA_QUEUE);
3144
if (ret < 0) {
3145
IWL_ERR(mvm, "Failed to allocate agg queue\n");
3146
goto out;
3147
}
3148
3149
txq_id = ret;
3150
3151
/* TXQ hasn't yet been enabled, so mark it only as reserved */
3152
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
3153
} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
3154
ret = -ENXIO;
3155
IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
3156
tid, IWL_MAX_HW_QUEUES - 1);
3157
goto out;
3158
3159
} else if (unlikely(mvm->queue_info[txq_id].status ==
3160
IWL_MVM_QUEUE_SHARED)) {
3161
ret = -ENXIO;
3162
IWL_DEBUG_TX_QUEUES(mvm,
3163
"Can't start tid %d agg on shared queue!\n",
3164
tid);
3165
goto out;
3166
}
3167
3168
IWL_DEBUG_TX_QUEUES(mvm,
3169
"AGG for tid %d will be on queue #%d\n",
3170
tid, txq_id);
3171
3172
tid_data = &mvmsta->tid_data[tid];
3173
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3174
tid_data->txq_id = txq_id;
3175
*ssn = tid_data->ssn;
3176
3177
IWL_DEBUG_TX_QUEUES(mvm,
3178
"Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3179
mvmsta->deflink.sta_id, tid, txq_id,
3180
tid_data->ssn,
3181
tid_data->next_reclaimed);
3182
3183
/*
3184
* In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3185
* to align the wrap around of ssn so we compare relevant values.
3186
*/
3187
normalized_ssn = tid_data->ssn;
3188
if (mvm->trans->mac_cfg->gen2)
3189
normalized_ssn &= 0xff;
3190
3191
if (normalized_ssn == tid_data->next_reclaimed) {
3192
tid_data->state = IWL_AGG_STARTING;
3193
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3194
} else {
3195
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3196
ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3197
}
3198
3199
out:
3200
spin_unlock_bh(&mvmsta->lock);
3201
3202
return ret;
3203
}
3204
3205
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3206
struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3207
bool amsdu)
3208
{
3209
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3210
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3211
unsigned int wdg_timeout =
3212
iwl_mvm_get_wd_timeout(mvm, vif);
3213
int queue, ret;
3214
bool alloc_queue = true;
3215
enum iwl_mvm_queue_status queue_status;
3216
u16 ssn;
3217
3218
struct iwl_trans_txq_scd_cfg cfg = {
3219
.sta_id = mvmsta->deflink.sta_id,
3220
.tid = tid,
3221
.frame_limit = buf_size,
3222
.aggregate = true,
3223
};
3224
3225
/*
3226
* When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3227
* manager, so this function should never be called in this case.
3228
*/
3229
if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3230
return -EINVAL;
3231
3232
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3233
!= IWL_MAX_TID_COUNT);
3234
3235
spin_lock_bh(&mvmsta->lock);
3236
ssn = tid_data->ssn;
3237
queue = tid_data->txq_id;
3238
tid_data->state = IWL_AGG_ON;
3239
mvmsta->agg_tids |= BIT(tid);
3240
tid_data->ssn = 0xffff;
3241
tid_data->amsdu_in_ampdu_allowed = amsdu;
3242
spin_unlock_bh(&mvmsta->lock);
3243
3244
if (iwl_mvm_has_new_tx_api(mvm)) {
3245
/*
3246
* If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3247
* would have failed, so if we are here there is no need to
3248
* allocate a queue.
3249
* However, if aggregation size is different than the default
3250
* size, the scheduler should be reconfigured.
3251
* We cannot do this with the new TX API, so return unsupported
3252
* for now, until it will be offloaded to firmware..
3253
* Note that if SCD default value changes - this condition
3254
* should be updated as well.
3255
*/
3256
if (buf_size < IWL_FRAME_LIMIT)
3257
return -EOPNOTSUPP;
3258
3259
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3260
if (ret)
3261
return -EIO;
3262
goto out;
3263
}
3264
3265
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3266
3267
queue_status = mvm->queue_info[queue].status;
3268
3269
/* Maybe there is no need to even alloc a queue... */
3270
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3271
alloc_queue = false;
3272
3273
/*
3274
* Only reconfig the SCD for the queue if the window size has
3275
* changed from current (become smaller)
3276
*/
3277
if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3278
/*
3279
* If reconfiguring an existing queue, it first must be
3280
* drained
3281
*/
3282
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3283
BIT(queue));
3284
if (ret) {
3285
IWL_ERR(mvm,
3286
"Error draining queue before reconfig\n");
3287
return ret;
3288
}
3289
3290
ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3291
mvmsta->deflink.sta_id, tid,
3292
buf_size, ssn);
3293
if (ret) {
3294
IWL_ERR(mvm,
3295
"Error reconfiguring TXQ #%d\n", queue);
3296
return ret;
3297
}
3298
}
3299
3300
if (alloc_queue)
3301
iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3302
&cfg, wdg_timeout);
3303
3304
/* Send ADD_STA command to enable aggs only if the queue isn't shared */
3305
if (queue_status != IWL_MVM_QUEUE_SHARED) {
3306
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3307
if (ret)
3308
return -EIO;
3309
}
3310
3311
/* No need to mark as reserved */
3312
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3313
3314
out:
3315
/*
3316
* Even though in theory the peer could have different
3317
* aggregation reorder buffer sizes for different sessions,
3318
* our ucode doesn't allow for that and has a global limit
3319
* for each station. Therefore, use the minimum of all the
3320
* aggregation sessions and our default value.
3321
*/
3322
mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
3323
min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize,
3324
buf_size);
3325
mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit =
3326
mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize;
3327
3328
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3329
sta->addr, tid);
3330
3331
return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq);
3332
}
3333
3334
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3335
struct iwl_mvm_sta *mvmsta,
3336
struct iwl_mvm_tid_data *tid_data)
3337
{
3338
u16 txq_id = tid_data->txq_id;
3339
3340
lockdep_assert_held(&mvm->mutex);
3341
3342
if (iwl_mvm_has_new_tx_api(mvm))
3343
return;
3344
3345
/*
3346
* The TXQ is marked as reserved only if no traffic came through yet
3347
* This means no traffic has been sent on this TID (agg'd or not), so
3348
* we no longer have use for the queue. Since it hasn't even been
3349
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
3350
* free.
3351
*/
3352
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3353
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3354
tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3355
}
3356
}
3357
3358
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3359
struct ieee80211_sta *sta, u16 tid)
3360
{
3361
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3362
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3363
u16 txq_id;
3364
int err;
3365
3366
/*
3367
* If mac80211 is cleaning its state, then say that we finished since
3368
* our state has been cleared anyway.
3369
*/
3370
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3371
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3372
return 0;
3373
}
3374
3375
spin_lock_bh(&mvmsta->lock);
3376
3377
txq_id = tid_data->txq_id;
3378
3379
IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3380
mvmsta->deflink.sta_id, tid, txq_id,
3381
tid_data->state);
3382
3383
mvmsta->agg_tids &= ~BIT(tid);
3384
3385
iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3386
3387
switch (tid_data->state) {
3388
case IWL_AGG_ON:
3389
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3390
3391
IWL_DEBUG_TX_QUEUES(mvm,
3392
"ssn = %d, next_recl = %d\n",
3393
tid_data->ssn, tid_data->next_reclaimed);
3394
3395
tid_data->ssn = 0xffff;
3396
tid_data->state = IWL_AGG_OFF;
3397
spin_unlock_bh(&mvmsta->lock);
3398
3399
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3400
3401
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3402
return 0;
3403
case IWL_AGG_STARTING:
3404
case IWL_EMPTYING_HW_QUEUE_ADDBA:
3405
/*
3406
* The agg session has been stopped before it was set up. This
3407
* can happen when the AddBA timer times out for example.
3408
*/
3409
3410
/* No barriers since we are under mutex */
3411
lockdep_assert_held(&mvm->mutex);
3412
3413
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3414
tid_data->state = IWL_AGG_OFF;
3415
err = 0;
3416
break;
3417
default:
3418
IWL_ERR(mvm,
3419
"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3420
mvmsta->deflink.sta_id, tid, tid_data->state);
3421
IWL_ERR(mvm,
3422
"\ttid_data->txq_id = %d\n", tid_data->txq_id);
3423
err = -EINVAL;
3424
}
3425
3426
spin_unlock_bh(&mvmsta->lock);
3427
3428
return err;
3429
}
3430
3431
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3432
struct ieee80211_sta *sta, u16 tid)
3433
{
3434
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3435
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3436
u16 txq_id;
3437
enum iwl_mvm_agg_state old_state;
3438
3439
/*
3440
* First set the agg state to OFF to avoid calling
3441
* ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3442
*/
3443
spin_lock_bh(&mvmsta->lock);
3444
txq_id = tid_data->txq_id;
3445
IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3446
mvmsta->deflink.sta_id, tid, txq_id,
3447
tid_data->state);
3448
old_state = tid_data->state;
3449
tid_data->state = IWL_AGG_OFF;
3450
mvmsta->agg_tids &= ~BIT(tid);
3451
spin_unlock_bh(&mvmsta->lock);
3452
3453
iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3454
3455
if (old_state >= IWL_AGG_ON) {
3456
iwl_mvm_drain_sta(mvm, mvmsta, true);
3457
3458
if (iwl_mvm_has_new_tx_api(mvm)) {
3459
if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id,
3460
BIT(tid)))
3461
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3462
iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3463
} else {
3464
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3465
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3466
iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3467
}
3468
3469
iwl_mvm_drain_sta(mvm, mvmsta, false);
3470
3471
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3472
}
3473
3474
return 0;
3475
}
3476
3477
static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3478
{
3479
int i, max = -1, max_offs = -1;
3480
3481
lockdep_assert_held(&mvm->mutex);
3482
3483
/* Pick the unused key offset with the highest 'deleted'
3484
* counter. Every time a key is deleted, all the counters
3485
* are incremented and the one that was just deleted is
3486
* reset to zero. Thus, the highest counter is the one
3487
* that was deleted longest ago. Pick that one.
3488
*/
3489
for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3490
if (test_bit(i, mvm->fw_key_table))
3491
continue;
3492
if (mvm->fw_key_deleted[i] > max) {
3493
max = mvm->fw_key_deleted[i];
3494
max_offs = i;
3495
}
3496
}
3497
3498
if (max_offs < 0)
3499
return STA_KEY_IDX_INVALID;
3500
3501
return max_offs;
3502
}
3503
3504
static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3505
struct ieee80211_vif *vif,
3506
struct ieee80211_sta *sta)
3507
{
3508
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3509
3510
if (sta)
3511
return iwl_mvm_sta_from_mac80211(sta);
3512
3513
/*
3514
* The device expects GTKs for station interfaces to be
3515
* installed as GTKs for the AP station. If we have no
3516
* station ID, then use AP's station ID.
3517
*/
3518
if (vif->type == NL80211_IFTYPE_STATION &&
3519
mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
3520
u8 sta_id = mvmvif->deflink.ap_sta_id;
3521
3522
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3523
lockdep_is_held(&mvm->mutex));
3524
3525
/*
3526
* It is possible that the 'sta' parameter is NULL,
3527
* for example when a GTK is removed - the sta_id will then
3528
* be the AP ID, and no station was passed by mac80211.
3529
*/
3530
if (IS_ERR_OR_NULL(sta))
3531
return NULL;
3532
3533
return iwl_mvm_sta_from_mac80211(sta);
3534
}
3535
3536
return NULL;
3537
}
3538
3539
static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3540
{
3541
int i;
3542
3543
for (i = len - 1; i >= 0; i--) {
3544
if (pn1[i] > pn2[i])
3545
return 1;
3546
if (pn1[i] < pn2[i])
3547
return -1;
3548
}
3549
3550
return 0;
3551
}
3552
3553
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3554
u32 sta_id,
3555
struct ieee80211_key_conf *key, bool mcast,
3556
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3557
u8 key_offset, bool mfp)
3558
{
3559
union {
3560
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3561
struct iwl_mvm_add_sta_key_cmd cmd;
3562
} u = {};
3563
__le16 key_flags;
3564
int ret;
3565
u32 status;
3566
u16 keyidx;
3567
u64 pn = 0;
3568
int i, size;
3569
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3570
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3571
int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3572
new_api ? 2 : 1);
3573
3574
if (sta_id == IWL_INVALID_STA)
3575
return -EINVAL;
3576
3577
keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3578
STA_KEY_FLG_KEYID_MSK;
3579
key_flags = cpu_to_le16(keyidx);
3580
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3581
3582
if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
3583
key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
3584
3585
switch (key->cipher) {
3586
case WLAN_CIPHER_SUITE_TKIP:
3587
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3588
if (api_ver >= 2) {
3589
memcpy((void *)&u.cmd.tx_mic_key,
3590
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3591
IWL_MIC_KEY_SIZE);
3592
3593
memcpy((void *)&u.cmd.rx_mic_key,
3594
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3595
IWL_MIC_KEY_SIZE);
3596
pn = atomic64_read(&key->tx_pn);
3597
3598
} else {
3599
u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3600
for (i = 0; i < 5; i++)
3601
u.cmd_v1.tkip_rx_ttak[i] =
3602
cpu_to_le16(tkip_p1k[i]);
3603
}
3604
memcpy(u.cmd.common.key, key->key, key->keylen);
3605
break;
3606
case WLAN_CIPHER_SUITE_CCMP:
3607
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3608
memcpy(u.cmd.common.key, key->key, key->keylen);
3609
if (api_ver >= 2)
3610
pn = atomic64_read(&key->tx_pn);
3611
break;
3612
case WLAN_CIPHER_SUITE_WEP104:
3613
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3614
fallthrough;
3615
case WLAN_CIPHER_SUITE_WEP40:
3616
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3617
memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3618
break;
3619
case WLAN_CIPHER_SUITE_GCMP_256:
3620
key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3621
fallthrough;
3622
case WLAN_CIPHER_SUITE_GCMP:
3623
key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3624
memcpy(u.cmd.common.key, key->key, key->keylen);
3625
if (api_ver >= 2)
3626
pn = atomic64_read(&key->tx_pn);
3627
break;
3628
default:
3629
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3630
memcpy(u.cmd.common.key, key->key, key->keylen);
3631
}
3632
3633
if (mcast)
3634
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3635
if (mfp)
3636
key_flags |= cpu_to_le16(STA_KEY_MFP);
3637
3638
u.cmd.common.key_offset = key_offset;
3639
u.cmd.common.key_flags = key_flags;
3640
u.cmd.common.sta_id = sta_id;
3641
3642
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3643
i = 0;
3644
else
3645
i = -1;
3646
3647
for (; i < IEEE80211_NUM_TIDS; i++) {
3648
struct ieee80211_key_seq seq = {};
3649
u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3650
int rx_pn_len = 8;
3651
/* there's a hole at 2/3 in FW format depending on version */
3652
int hole = api_ver >= 3 ? 0 : 2;
3653
3654
ieee80211_get_key_rx_seq(key, i, &seq);
3655
3656
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3657
rx_pn[0] = seq.tkip.iv16;
3658
rx_pn[1] = seq.tkip.iv16 >> 8;
3659
rx_pn[2 + hole] = seq.tkip.iv32;
3660
rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3661
rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3662
rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3663
} else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3664
rx_pn = seq.hw.seq;
3665
rx_pn_len = seq.hw.seq_len;
3666
} else {
3667
rx_pn[0] = seq.ccmp.pn[0];
3668
rx_pn[1] = seq.ccmp.pn[1];
3669
rx_pn[2 + hole] = seq.ccmp.pn[2];
3670
rx_pn[3 + hole] = seq.ccmp.pn[3];
3671
rx_pn[4 + hole] = seq.ccmp.pn[4];
3672
rx_pn[5 + hole] = seq.ccmp.pn[5];
3673
}
3674
3675
if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3676
rx_pn_len) > 0)
3677
memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3678
rx_pn_len);
3679
}
3680
3681
if (api_ver >= 2) {
3682
u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3683
size = sizeof(u.cmd);
3684
} else {
3685
size = sizeof(u.cmd_v1);
3686
}
3687
3688
status = ADD_STA_SUCCESS;
3689
if (cmd_flags & CMD_ASYNC)
3690
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3691
&u.cmd);
3692
else
3693
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3694
&u.cmd, &status);
3695
3696
switch (status) {
3697
case ADD_STA_SUCCESS:
3698
IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3699
break;
3700
default:
3701
ret = -EIO;
3702
IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3703
break;
3704
}
3705
3706
return ret;
3707
}
3708
3709
static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3710
struct ieee80211_key_conf *keyconf,
3711
u8 sta_id, bool remove_key)
3712
{
3713
struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3714
3715
/* verify the key details match the required command's expectations */
3716
if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3717
(keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3718
keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3719
(keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3720
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3721
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3722
return -EINVAL;
3723
3724
if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3725
keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3726
return -EINVAL;
3727
3728
igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3729
igtk_cmd.sta_id = cpu_to_le32(sta_id);
3730
3731
if (remove_key) {
3732
/* This is a valid situation for IGTK */
3733
if (sta_id == IWL_INVALID_STA)
3734
return 0;
3735
3736
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3737
} else {
3738
struct ieee80211_key_seq seq;
3739
const u8 *pn;
3740
3741
switch (keyconf->cipher) {
3742
case WLAN_CIPHER_SUITE_AES_CMAC:
3743
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3744
break;
3745
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3746
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3747
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3748
break;
3749
default:
3750
return -EINVAL;
3751
}
3752
3753
memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3754
if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3755
igtk_cmd.ctrl_flags |=
3756
cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3757
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3758
pn = seq.aes_cmac.pn;
3759
igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3760
((u64) pn[4] << 8) |
3761
((u64) pn[3] << 16) |
3762
((u64) pn[2] << 24) |
3763
((u64) pn[1] << 32) |
3764
((u64) pn[0] << 40));
3765
}
3766
3767
IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3768
remove_key ? "removing" : "installing",
3769
keyconf->keyidx >= 6 ? "B" : "",
3770
keyconf->keyidx, igtk_cmd.sta_id);
3771
3772
if (!iwl_mvm_has_new_rx_api(mvm)) {
3773
struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3774
.ctrl_flags = igtk_cmd.ctrl_flags,
3775
.key_id = igtk_cmd.key_id,
3776
.sta_id = igtk_cmd.sta_id,
3777
.receive_seq_cnt = igtk_cmd.receive_seq_cnt
3778
};
3779
3780
memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3781
ARRAY_SIZE(igtk_cmd_v1.igtk));
3782
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3783
sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3784
}
3785
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3786
sizeof(igtk_cmd), &igtk_cmd);
3787
}
3788
3789
3790
static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3791
struct ieee80211_vif *vif,
3792
struct ieee80211_sta *sta)
3793
{
3794
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3795
3796
if (sta)
3797
return sta->addr;
3798
3799
if (vif->type == NL80211_IFTYPE_STATION &&
3800
mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
3801
u8 sta_id = mvmvif->deflink.ap_sta_id;
3802
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3803
lockdep_is_held(&mvm->mutex));
3804
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
3805
return NULL;
3806
3807
return sta->addr;
3808
}
3809
3810
3811
return NULL;
3812
}
3813
3814
static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3815
struct ieee80211_vif *vif,
3816
struct ieee80211_sta *sta,
3817
struct ieee80211_key_conf *keyconf,
3818
u8 key_offset,
3819
bool mcast)
3820
{
3821
const u8 *addr;
3822
struct ieee80211_key_seq seq;
3823
u16 p1k[5];
3824
u32 sta_id;
3825
bool mfp = false;
3826
3827
if (sta) {
3828
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3829
3830
sta_id = mvm_sta->deflink.sta_id;
3831
mfp = sta->mfp;
3832
} else if (vif->type == NL80211_IFTYPE_AP &&
3833
!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3834
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3835
3836
sta_id = mvmvif->deflink.mcast_sta.sta_id;
3837
} else {
3838
IWL_ERR(mvm, "Failed to find station id\n");
3839
return -EINVAL;
3840
}
3841
3842
if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3843
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3844
if (!addr) {
3845
IWL_ERR(mvm, "Failed to find mac address\n");
3846
return -EINVAL;
3847
}
3848
3849
/* get phase 1 key from mac80211 */
3850
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3851
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3852
3853
return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3854
seq.tkip.iv32, p1k, 0, key_offset,
3855
mfp);
3856
}
3857
3858
return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3859
0, NULL, 0, key_offset, mfp);
3860
}
3861
3862
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3863
struct ieee80211_vif *vif,
3864
struct ieee80211_sta *sta,
3865
struct ieee80211_key_conf *keyconf,
3866
u8 key_offset)
3867
{
3868
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3869
struct iwl_mvm_sta *mvm_sta;
3870
u8 sta_id = IWL_INVALID_STA;
3871
int ret;
3872
static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3873
3874
lockdep_assert_held(&mvm->mutex);
3875
3876
if (vif->type != NL80211_IFTYPE_AP ||
3877
keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3878
/* Get the station id from the mvm local station table */
3879
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3880
if (!mvm_sta) {
3881
IWL_ERR(mvm, "Failed to find station\n");
3882
return -EINVAL;
3883
}
3884
sta_id = mvm_sta->deflink.sta_id;
3885
3886
/*
3887
* It is possible that the 'sta' parameter is NULL, and thus
3888
* there is a need to retrieve the sta from the local station
3889
* table.
3890
*/
3891
if (!sta) {
3892
sta = rcu_dereference_protected(
3893
mvm->fw_id_to_mac_id[sta_id],
3894
lockdep_is_held(&mvm->mutex));
3895
if (IS_ERR_OR_NULL(sta)) {
3896
IWL_ERR(mvm, "Invalid station id\n");
3897
return -EINVAL;
3898
}
3899
}
3900
3901
if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3902
return -EINVAL;
3903
} else {
3904
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3905
3906
sta_id = mvmvif->deflink.mcast_sta.sta_id;
3907
}
3908
3909
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3910
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3911
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3912
ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3913
goto end;
3914
}
3915
3916
/* If the key_offset is not pre-assigned, we need to find a
3917
* new offset to use. In normal cases, the offset is not
3918
* pre-assigned, but during HW_RESTART we want to reuse the
3919
* same indices, so we pass them when this function is called.
3920
*
3921
* In D3 entry, we need to hardcoded the indices (because the
3922
* firmware hardcodes the PTK offset to 0). In this case, we
3923
* need to make sure we don't overwrite the hw_key_idx in the
3924
* keyconf structure, because otherwise we cannot configure
3925
* the original ones back when resuming.
3926
*/
3927
if (key_offset == STA_KEY_IDX_INVALID) {
3928
key_offset = iwl_mvm_set_fw_key_idx(mvm);
3929
if (key_offset == STA_KEY_IDX_INVALID)
3930
return -ENOSPC;
3931
keyconf->hw_key_idx = key_offset;
3932
}
3933
3934
ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3935
if (ret)
3936
goto end;
3937
3938
/*
3939
* For WEP, the same key is used for multicast and unicast. Upload it
3940
* again, using the same key offset, and now pointing the other one
3941
* to the same key slot (offset).
3942
* If this fails, remove the original as well.
3943
*/
3944
if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3945
keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3946
sta) {
3947
ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3948
key_offset, !mcast);
3949
if (ret) {
3950
__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3951
goto end;
3952
}
3953
}
3954
3955
__set_bit(key_offset, mvm->fw_key_table);
3956
3957
end:
3958
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3959
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3960
sta ? sta->addr : zero_addr, ret);
3961
return ret;
3962
}
3963
3964
int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3965
struct ieee80211_vif *vif,
3966
struct ieee80211_sta *sta,
3967
struct ieee80211_key_conf *keyconf)
3968
{
3969
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3970
struct iwl_mvm_sta *mvm_sta;
3971
u8 sta_id = IWL_INVALID_STA;
3972
int ret, i;
3973
3974
lockdep_assert_held(&mvm->mutex);
3975
3976
/* Get the station from the mvm local station table */
3977
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3978
if (mvm_sta)
3979
sta_id = mvm_sta->deflink.sta_id;
3980
else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3981
sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id;
3982
3983
3984
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3985
keyconf->keyidx, sta_id);
3986
3987
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3988
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3989
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3990
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3991
3992
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3993
IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3994
keyconf->hw_key_idx);
3995
return -ENOENT;
3996
}
3997
3998
/* track which key was deleted last */
3999
for (i = 0; i < STA_KEY_MAX_NUM; i++) {
4000
if (mvm->fw_key_deleted[i] < U8_MAX)
4001
mvm->fw_key_deleted[i]++;
4002
}
4003
mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
4004
4005
if (sta && !mvm_sta) {
4006
IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
4007
return 0;
4008
}
4009
4010
ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
4011
if (ret)
4012
return ret;
4013
4014
/* delete WEP key twice to get rid of (now useless) offset */
4015
if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
4016
keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
4017
ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
4018
4019
return ret;
4020
}
4021
4022
void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
4023
struct ieee80211_vif *vif,
4024
struct ieee80211_key_conf *keyconf,
4025
struct ieee80211_sta *sta, u32 iv32,
4026
u16 *phase1key)
4027
{
4028
struct iwl_mvm_sta *mvm_sta;
4029
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
4030
bool mfp = sta ? sta->mfp : false;
4031
4032
rcu_read_lock();
4033
4034
mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
4035
if (WARN_ON_ONCE(!mvm_sta))
4036
goto unlock;
4037
iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast,
4038
iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
4039
mfp);
4040
4041
unlock:
4042
rcu_read_unlock();
4043
}
4044
4045
void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
4046
struct ieee80211_sta *sta)
4047
{
4048
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4049
struct iwl_mvm_add_sta_cmd cmd = {
4050
.add_modify = STA_MODE_MODIFY,
4051
.sta_id = mvmsta->deflink.sta_id,
4052
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
4053
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4054
};
4055
int ret;
4056
4057
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4058
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4059
if (ret)
4060
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4061
}
4062
4063
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
4064
struct ieee80211_sta *sta,
4065
enum ieee80211_frame_release_type reason,
4066
u16 cnt, u16 tids, bool more_data,
4067
bool single_sta_queue)
4068
{
4069
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4070
struct iwl_mvm_add_sta_cmd cmd = {
4071
.add_modify = STA_MODE_MODIFY,
4072
.sta_id = mvmsta->deflink.sta_id,
4073
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
4074
.sleep_tx_count = cpu_to_le16(cnt),
4075
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4076
};
4077
int tid, ret;
4078
unsigned long _tids = tids;
4079
4080
/* convert TIDs to ACs - we don't support TSPEC so that's OK
4081
* Note that this field is reserved and unused by firmware not
4082
* supporting GO uAPSD, so it's safe to always do this.
4083
*/
4084
for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
4085
cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
4086
4087
/* If we're releasing frames from aggregation or dqa queues then check
4088
* if all the queues that we're releasing frames from, combined, have:
4089
* - more frames than the service period, in which case more_data
4090
* needs to be set
4091
* - fewer than 'cnt' frames, in which case we need to adjust the
4092
* firmware command (but do that unconditionally)
4093
*/
4094
if (single_sta_queue) {
4095
int remaining = cnt;
4096
int sleep_tx_count;
4097
4098
spin_lock_bh(&mvmsta->lock);
4099
for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
4100
struct iwl_mvm_tid_data *tid_data;
4101
u16 n_queued;
4102
4103
tid_data = &mvmsta->tid_data[tid];
4104
4105
n_queued = iwl_mvm_tid_queued(mvm, tid_data);
4106
if (n_queued > remaining) {
4107
more_data = true;
4108
remaining = 0;
4109
break;
4110
}
4111
remaining -= n_queued;
4112
}
4113
sleep_tx_count = cnt - remaining;
4114
if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
4115
mvmsta->sleep_tx_count = sleep_tx_count;
4116
spin_unlock_bh(&mvmsta->lock);
4117
4118
cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
4119
if (WARN_ON(cnt - remaining == 0)) {
4120
ieee80211_sta_eosp(sta);
4121
return;
4122
}
4123
}
4124
4125
/* Note: this is ignored by firmware not supporting GO uAPSD */
4126
if (more_data)
4127
cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
4128
4129
if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
4130
mvmsta->next_status_eosp = true;
4131
cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
4132
} else {
4133
cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
4134
}
4135
4136
/* block the Tx queues until the FW updated the sleep Tx count */
4137
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
4138
CMD_ASYNC | CMD_BLOCK_TXQS,
4139
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4140
if (ret)
4141
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4142
}
4143
4144
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
4145
struct iwl_rx_cmd_buffer *rxb)
4146
{
4147
struct iwl_rx_packet *pkt = rxb_addr(rxb);
4148
struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
4149
struct ieee80211_sta *sta;
4150
u32 sta_id = le32_to_cpu(notif->sta_id);
4151
4152
if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
4153
return;
4154
4155
rcu_read_lock();
4156
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
4157
if (!IS_ERR_OR_NULL(sta))
4158
ieee80211_sta_eosp(sta);
4159
rcu_read_unlock();
4160
}
4161
4162
void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
4163
struct iwl_mvm_sta *mvmsta,
4164
bool disable)
4165
{
4166
struct iwl_mvm_add_sta_cmd cmd = {
4167
.add_modify = STA_MODE_MODIFY,
4168
.sta_id = mvmsta->deflink.sta_id,
4169
.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4170
.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4171
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4172
};
4173
int ret;
4174
4175
if (mvm->mld_api_is_used) {
4176
if (!iwl_mvm_has_no_host_disable_tx(mvm))
4177
iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
4178
return;
4179
}
4180
4181
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4182
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4183
if (ret)
4184
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4185
}
4186
4187
void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
4188
struct ieee80211_sta *sta,
4189
bool disable)
4190
{
4191
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4192
4193
if (mvm->mld_api_is_used) {
4194
if (!iwl_mvm_has_no_host_disable_tx(mvm))
4195
iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
4196
return;
4197
}
4198
4199
spin_lock_bh(&mvm_sta->lock);
4200
4201
if (mvm_sta->disable_tx == disable) {
4202
spin_unlock_bh(&mvm_sta->lock);
4203
return;
4204
}
4205
4206
mvm_sta->disable_tx = disable;
4207
4208
/*
4209
* If sta PS state is handled by mac80211, tell it to start/stop
4210
* queuing tx for this station.
4211
*/
4212
if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4213
ieee80211_sta_block_awake(mvm->hw, sta, disable);
4214
4215
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4216
4217
spin_unlock_bh(&mvm_sta->lock);
4218
}
4219
4220
static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4221
struct iwl_mvm_vif *mvmvif,
4222
struct iwl_mvm_int_sta *sta,
4223
bool disable)
4224
{
4225
u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4226
struct iwl_mvm_add_sta_cmd cmd = {
4227
.add_modify = STA_MODE_MODIFY,
4228
.sta_id = sta->sta_id,
4229
.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4230
.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4231
.mac_id_n_color = cpu_to_le32(id),
4232
};
4233
int ret;
4234
4235
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4236
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4237
if (ret)
4238
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4239
}
4240
4241
void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4242
struct iwl_mvm_vif *mvmvif,
4243
bool disable)
4244
{
4245
struct ieee80211_sta *sta;
4246
struct iwl_mvm_sta *mvm_sta;
4247
int i;
4248
4249
if (mvm->mld_api_is_used) {
4250
if (!iwl_mvm_has_no_host_disable_tx(mvm))
4251
iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif,
4252
disable);
4253
return;
4254
}
4255
4256
rcu_read_lock();
4257
4258
/* Block/unblock all the stations of the given mvmvif */
4259
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4260
sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4261
if (IS_ERR_OR_NULL(sta))
4262
continue;
4263
4264
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4265
if (mvm_sta->mac_id_n_color !=
4266
FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4267
continue;
4268
4269
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4270
}
4271
4272
rcu_read_unlock();
4273
4274
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4275
return;
4276
4277
/* Need to block/unblock also multicast station */
4278
if (mvmvif->deflink.mcast_sta.sta_id != IWL_INVALID_STA)
4279
iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4280
&mvmvif->deflink.mcast_sta,
4281
disable);
4282
4283
/*
4284
* Only unblock the broadcast station (FW blocks it for immediate
4285
* quiet, not the driver)
4286
*/
4287
if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_INVALID_STA)
4288
iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4289
&mvmvif->deflink.bcast_sta,
4290
disable);
4291
}
4292
4293
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4294
{
4295
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4296
struct iwl_mvm_sta *mvmsta;
4297
4298
rcu_read_lock();
4299
4300
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id);
4301
4302
if (mvmsta)
4303
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4304
4305
rcu_read_unlock();
4306
}
4307
4308
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4309
{
4310
u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4311
4312
/*
4313
* In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4314
* to align the wrap around of ssn so we compare relevant values.
4315
*/
4316
if (mvm->trans->mac_cfg->gen2)
4317
sn &= 0xff;
4318
4319
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4320
}
4321
4322
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4323
struct ieee80211_vif *vif,
4324
u32 id)
4325
{
4326
struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4327
.id = cpu_to_le32(id),
4328
};
4329
int ret;
4330
4331
ret = iwl_mvm_send_cmd_pdu(mvm,
4332
WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4333
CMD_ASYNC,
4334
sizeof(cancel_channel_switch_cmd),
4335
&cancel_channel_switch_cmd);
4336
if (ret)
4337
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
4338
}
4339
4340
static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
4341
u8 fw_sta_id)
4342
{
4343
struct ieee80211_link_sta *link_sta =
4344
rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
4345
struct iwl_mvm_vif_link_info *link;
4346
4347
if (WARN_ON_ONCE(!link_sta))
4348
return -EINVAL;
4349
4350
link = mvmvif->link[link_sta->link_id];
4351
4352
if (WARN_ON_ONCE(!link))
4353
return -EINVAL;
4354
4355
return link->fw_link_id;
4356
}
4357
4358
#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
4359
4360
void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
4361
bool tx, int queue)
4362
{
4363
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
4364
struct iwl_mvm *mvm = mvmvif->mvm;
4365
struct iwl_mvm_tpt_counter *queue_counter;
4366
struct iwl_mvm_mpdu_counter *link_counter;
4367
u32 total_mpdus = 0;
4368
int fw_link_id;
4369
4370
/* Count only for a BSS sta, and only when EMLSR is possible */
4371
if (!mvm_sta->mpdu_counters)
4372
return;
4373
4374
/* Map sta id to link id */
4375
fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
4376
if (fw_link_id < 0)
4377
return;
4378
4379
queue_counter = &mvm_sta->mpdu_counters[queue];
4380
link_counter = &queue_counter->per_link[fw_link_id];
4381
4382
spin_lock_bh(&queue_counter->lock);
4383
4384
if (tx)
4385
link_counter->tx += count;
4386
else
4387
link_counter->rx += count;
4388
4389
/*
4390
* When not in EMLSR, the window and the decision to enter EMLSR are
4391
* handled during counting, when in EMLSR - in the statistics flow
4392
*/
4393
if (mvmvif->esr_active)
4394
goto out;
4395
4396
if (time_is_before_jiffies(queue_counter->window_start +
4397
IWL_MVM_TPT_COUNT_WINDOW)) {
4398
memset(queue_counter->per_link, 0,
4399
sizeof(queue_counter->per_link));
4400
queue_counter->window_start = jiffies;
4401
4402
IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
4403
}
4404
4405
for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
4406
total_mpdus += tx ? queue_counter->per_link[i].tx :
4407
queue_counter->per_link[i].rx;
4408
4409
if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
4410
wiphy_work_queue(mvmvif->mvm->hw->wiphy,
4411
&mvmvif->unblock_esr_tpt_wk);
4412
4413
out:
4414
spin_unlock_bh(&queue_counter->lock);
4415
}
4416
4417