Path: blob/main/sys/contrib/dev/iwlwifi/mvm/sta.c
106821 views
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause1/*2* Copyright (C) 2012-2015, 2018-2025 Intel Corporation3* Copyright (C) 2013-2015 Intel Mobile Communications GmbH4* Copyright (C) 2016-2017 Intel Deutschland GmbH5*/6#include <net/mac80211.h>7#if defined(__FreeBSD__)8#include <linux/cache.h>9#endif1011#include "mvm.h"12#include "sta.h"13#include "rs.h"1415/*16* New version of ADD_STA_sta command added new fields at the end of the17* structure, so sending the size of the relevant API's structure is enough to18* support both API versions.19*/20static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)21{22if (iwl_mvm_has_new_rx_api(mvm) ||23fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))24return sizeof(struct iwl_mvm_add_sta_cmd);25else26return sizeof(struct iwl_mvm_add_sta_cmd_v7);27}2829int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)30{31int sta_id;32u32 reserved_ids = 0;3334BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32);35WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));3637lockdep_assert_held(&mvm->mutex);3839/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */40if (iftype != NL80211_IFTYPE_STATION)41reserved_ids = BIT(0);4243/* Don't take rcu_read_lock() since we are protected by mvm->mutex */44for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {45if (BIT(sta_id) & reserved_ids)46continue;4748if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],49lockdep_is_held(&mvm->mutex)))50return sta_id;51}52return IWL_INVALID_STA;53}5455/* Calculate the ampdu density and max size */56u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,57struct ieee80211_bss_conf *link_conf,58u32 *_agg_size)59{60u32 agg_size = 0, mpdu_dens = 0;6162if (WARN_ON(!link_sta))63return 0;6465/* Note that we always use only legacy & highest supported PPDUs, so66* of Draft P802.11be D.30 Table 10-12a--Fields used for calculating67* the maximum A-MPDU size of various PPDU types in different bands,68* we only need to worry about the highest supported PPDU type here.69*/7071if (link_sta->ht_cap.ht_supported) {72agg_size = link_sta->ht_cap.ampdu_factor;73mpdu_dens = link_sta->ht_cap.ampdu_density;74}7576if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {77/* overwrite HT values on 6 GHz */78mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,79IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);80agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa,81IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);82} else if (link_sta->vht_cap.vht_supported) {83/* if VHT supported overwrite HT value */84agg_size = u32_get_bits(link_sta->vht_cap.cap,85IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);86}8788/* D6.0 10.12.2 A-MPDU length limit rules89* A STA indicates the maximum length of the A-MPDU preEOF padding90* that it can receive in an HE PPDU in the Maximum A-MPDU Length91* Exponent field in its HT Capabilities, VHT Capabilities,92* and HE 6 GHz Band Capabilities elements (if present) and the93* Maximum AMPDU Length Exponent Extension field in its HE94* Capabilities element95*/96if (link_sta->he_cap.has_he)97agg_size +=98u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],99IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);100101if (link_sta->eht_cap.has_eht)102agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],103IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);104105/* Limit to max A-MPDU supported by FW */106agg_size = min_t(u32, agg_size,107STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);108109*_agg_size = agg_size;110return mpdu_dens;111}112113u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta)114{115u8 uapsd_acs = 0;116117if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)118uapsd_acs |= BIT(AC_BK);119if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)120uapsd_acs |= BIT(AC_BE);121if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)122uapsd_acs |= BIT(AC_VI);123if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)124uapsd_acs |= BIT(AC_VO);125126return uapsd_acs | uapsd_acs << 4;127}128129/* send station add/update command to firmware */130int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,131bool update, unsigned int flags)132{133struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);134struct iwl_mvm_add_sta_cmd add_sta_cmd = {135.sta_id = mvm_sta->deflink.sta_id,136.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),137.add_modify = update ? 1 : 0,138.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |139STA_FLG_MIMO_EN_MSK |140STA_FLG_RTS_MIMO_PROT),141.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),142};143int ret;144u32 status;145u32 agg_size = 0, mpdu_dens = 0;146147if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))148add_sta_cmd.station_type = mvm_sta->sta_type;149150if (!update || (flags & STA_MODIFY_QUEUES)) {151memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);152153if (!iwl_mvm_has_new_tx_api(mvm)) {154add_sta_cmd.tfd_queue_msk =155cpu_to_le32(mvm_sta->tfd_queue_msk);156157if (flags & STA_MODIFY_QUEUES)158add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;159} else {160WARN_ON(flags & STA_MODIFY_QUEUES);161}162}163164switch (sta->deflink.bandwidth) {165case IEEE80211_STA_RX_BW_320:166case IEEE80211_STA_RX_BW_160:167add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);168fallthrough;169case IEEE80211_STA_RX_BW_80:170add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);171fallthrough;172case IEEE80211_STA_RX_BW_40:173add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);174fallthrough;175case IEEE80211_STA_RX_BW_20:176if (sta->deflink.ht_cap.ht_supported)177add_sta_cmd.station_flags |=178cpu_to_le32(STA_FLG_FAT_EN_20MHZ);179break;180}181182switch (sta->deflink.rx_nss) {183case 1:184add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);185break;186case 2:187add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);188break;189case 3 ... 8:190add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);191break;192}193194switch (sta->deflink.smps_mode) {195case IEEE80211_SMPS_AUTOMATIC:196case IEEE80211_SMPS_NUM_MODES:197WARN_ON(1);198break;199case IEEE80211_SMPS_STATIC:200/* override NSS */201add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);202add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);203break;204case IEEE80211_SMPS_DYNAMIC:205add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);206break;207case IEEE80211_SMPS_OFF:208/* nothing */209break;210}211212if (sta->deflink.ht_cap.ht_supported ||213mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)214add_sta_cmd.station_flags_msk |=215cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |216STA_FLG_AGG_MPDU_DENS_MSK);217218mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink,219&mvm_sta->vif->bss_conf,220&agg_size);221add_sta_cmd.station_flags |=222cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);223add_sta_cmd.station_flags |=224cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);225226if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)227add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);228229if (sta->wme) {230add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;231add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta);232add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;233}234235status = ADD_STA_SUCCESS;236ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,237iwl_mvm_add_sta_cmd_size(mvm),238&add_sta_cmd, &status);239if (ret)240return ret;241242switch (status & IWL_ADD_STA_STATUS_MASK) {243case ADD_STA_SUCCESS:244IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");245break;246default:247ret = -EIO;248IWL_ERR(mvm, "ADD_STA failed\n");249break;250}251252return ret;253}254255static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)256{257struct iwl_mvm_baid_data *data =258timer_container_of(data, t, session_timer);259struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;260struct iwl_mvm_baid_data *ba_data;261struct ieee80211_sta *sta;262struct iwl_mvm_sta *mvm_sta;263unsigned long timeout;264unsigned int sta_id;265266rcu_read_lock();267268ba_data = rcu_dereference(*rcu_ptr);269270if (WARN_ON(!ba_data))271goto unlock;272273if (!ba_data->timeout)274goto unlock;275276timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);277if (time_is_after_jiffies(timeout)) {278mod_timer(&ba_data->session_timer, timeout);279goto unlock;280}281282/* Timer expired */283sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */284sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]);285286/*287* sta should be valid unless the following happens:288* The firmware asserts which triggers a reconfig flow, but289* the reconfig fails before we set the pointer to sta into290* the fw_id_to_mac_id pointer table. Mac80211 can't stop291* A-MDPU and hence the timer continues to run. Then, the292* timer expires and sta is NULL.293*/294if (IS_ERR_OR_NULL(sta))295goto unlock;296297mvm_sta = iwl_mvm_sta_from_mac80211(sta);298ieee80211_rx_ba_timer_expired(mvm_sta->vif,299sta->addr, ba_data->tid);300unlock:301rcu_read_unlock();302}303304/* Disable aggregations for a bitmap of TIDs for a given station */305static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,306unsigned long disable_agg_tids,307bool remove_queue)308{309struct iwl_mvm_add_sta_cmd cmd = {};310struct ieee80211_sta *sta;311struct iwl_mvm_sta *mvmsta;312u32 status;313u8 sta_id;314315if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))316return -EINVAL;317318sta_id = mvm->queue_info[queue].ra_sta_id;319320rcu_read_lock();321322sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);323324if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {325rcu_read_unlock();326return -EINVAL;327}328329mvmsta = iwl_mvm_sta_from_mac80211(sta);330331mvmsta->tid_disable_agg |= disable_agg_tids;332333cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);334cmd.sta_id = mvmsta->deflink.sta_id;335cmd.add_modify = STA_MODE_MODIFY;336cmd.modify_mask = STA_MODIFY_QUEUES;337if (disable_agg_tids)338cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;339if (remove_queue)340cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;341cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);342cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);343344rcu_read_unlock();345346/* Notify FW of queue removal from the STA queues */347status = ADD_STA_SUCCESS;348return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,349iwl_mvm_add_sta_cmd_size(mvm),350&cmd, &status);351}352353static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,354int sta_id, u16 *queueptr, u8 tid)355{356int queue = *queueptr;357struct iwl_scd_txq_cfg_cmd cmd = {358.scd_queue = queue,359.action = SCD_CFG_DISABLE_QUEUE,360};361int ret;362363lockdep_assert_held(&mvm->mutex);364365if (iwl_mvm_has_new_tx_api(mvm)) {366if (mvm->sta_remove_requires_queue_remove) {367u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,368SCD_QUEUE_CONFIG_CMD);369struct iwl_scd_queue_cfg_cmd remove_cmd = {370.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),371.u.remove.sta_mask = cpu_to_le32(BIT(sta_id)),372};373374if (tid == IWL_MAX_TID_COUNT)375tid = IWL_MGMT_TID;376377remove_cmd.u.remove.tid = cpu_to_le32(tid);378379ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,380sizeof(remove_cmd),381&remove_cmd);382} else {383ret = 0;384}385386iwl_trans_txq_free(mvm->trans, queue);387*queueptr = IWL_MVM_INVALID_QUEUE;388389return ret;390}391392if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))393return 0;394395mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);396397cmd.action = mvm->queue_info[queue].tid_bitmap ?398SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;399if (cmd.action == SCD_CFG_DISABLE_QUEUE)400mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;401402IWL_DEBUG_TX_QUEUES(mvm,403"Disabling TXQ #%d tids=0x%x\n",404queue,405mvm->queue_info[queue].tid_bitmap);406407/* If the queue is still enabled - nothing left to do in this func */408if (cmd.action == SCD_CFG_ENABLE_QUEUE)409return 0;410411cmd.sta_id = mvm->queue_info[queue].ra_sta_id;412cmd.tid = mvm->queue_info[queue].txq_tid;413414/* Make sure queue info is correct even though we overwrite it */415WARN(mvm->queue_info[queue].tid_bitmap,416"TXQ #%d info out-of-sync - tids=0x%x\n",417queue, mvm->queue_info[queue].tid_bitmap);418419/* If we are here - the queue is freed and we can zero out these vals */420mvm->queue_info[queue].tid_bitmap = 0;421422if (sta) {423struct iwl_mvm_txq *mvmtxq =424iwl_mvm_txq_from_tid(sta, tid);425426spin_lock_bh(&mvm->add_stream_lock);427list_del_init(&mvmtxq->list);428clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);429mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;430spin_unlock_bh(&mvm->add_stream_lock);431}432433/* Regardless if this is a reserved TXQ for a STA - mark it as false */434mvm->queue_info[queue].reserved = false;435436iwl_trans_txq_disable(mvm->trans, queue, false);437ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,438sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);439440if (ret)441IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",442queue, ret);443return ret;444}445446static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)447{448struct ieee80211_sta *sta;449struct iwl_mvm_sta *mvmsta;450unsigned long tid_bitmap;451unsigned long agg_tids = 0;452u8 sta_id;453int tid;454455lockdep_assert_held(&mvm->mutex);456457if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))458return -EINVAL;459460sta_id = mvm->queue_info[queue].ra_sta_id;461tid_bitmap = mvm->queue_info[queue].tid_bitmap;462463sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],464lockdep_is_held(&mvm->mutex));465466if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))467return -EINVAL;468469mvmsta = iwl_mvm_sta_from_mac80211(sta);470471spin_lock_bh(&mvmsta->lock);472for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {473if (mvmsta->tid_data[tid].state == IWL_AGG_ON)474agg_tids |= BIT(tid);475}476spin_unlock_bh(&mvmsta->lock);477478return agg_tids;479}480481/*482* Remove a queue from a station's resources.483* Note that this only marks as free. It DOESN'T delete a BA agreement, and484* doesn't disable the queue485*/486static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)487{488struct ieee80211_sta *sta;489struct iwl_mvm_sta *mvmsta;490unsigned long tid_bitmap;491unsigned long disable_agg_tids = 0;492u8 sta_id;493int tid;494495lockdep_assert_held(&mvm->mutex);496497if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))498return -EINVAL;499500sta_id = mvm->queue_info[queue].ra_sta_id;501tid_bitmap = mvm->queue_info[queue].tid_bitmap;502503rcu_read_lock();504505sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);506507if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {508rcu_read_unlock();509return 0;510}511512mvmsta = iwl_mvm_sta_from_mac80211(sta);513514spin_lock_bh(&mvmsta->lock);515/* Unmap MAC queues and TIDs from this queue */516for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {517struct iwl_mvm_txq *mvmtxq =518iwl_mvm_txq_from_tid(sta, tid);519520if (mvmsta->tid_data[tid].state == IWL_AGG_ON)521disable_agg_tids |= BIT(tid);522mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;523524spin_lock_bh(&mvm->add_stream_lock);525list_del_init(&mvmtxq->list);526clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);527mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;528spin_unlock_bh(&mvm->add_stream_lock);529}530531mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */532spin_unlock_bh(&mvmsta->lock);533534rcu_read_unlock();535536/*537* The TX path may have been using this TXQ_ID from the tid_data,538* so make sure it's no longer running so that we can safely reuse539* this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE540* above, but nothing guarantees we've stopped using them. Thus,541* without this, we could get to iwl_mvm_disable_txq() and remove542* the queue while still sending frames to it.543*/544synchronize_net();545546return disable_agg_tids;547}548549static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,550struct ieee80211_sta *old_sta,551u8 new_sta_id)552{553struct iwl_mvm_sta *mvmsta;554u8 sta_id, tid;555unsigned long disable_agg_tids = 0;556bool same_sta;557u16 queue_tmp = queue;558int ret;559560lockdep_assert_held(&mvm->mutex);561562if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))563return -EINVAL;564565sta_id = mvm->queue_info[queue].ra_sta_id;566tid = mvm->queue_info[queue].txq_tid;567568same_sta = sta_id == new_sta_id;569570mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);571if (WARN_ON(!mvmsta))572return -EINVAL;573574disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);575/* Disable the queue */576if (disable_agg_tids)577iwl_mvm_invalidate_sta_queue(mvm, queue,578disable_agg_tids, false);579580ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);581if (ret) {582IWL_ERR(mvm,583"Failed to free inactive queue %d (ret=%d)\n",584queue, ret);585586return ret;587}588589/* If TXQ is allocated to another STA, update removal in FW */590if (!same_sta)591iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);592593return 0;594}595596static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,597unsigned long tfd_queue_mask, u8 ac)598{599int queue = 0;600u8 ac_to_queue[IEEE80211_NUM_ACS];601int i;602603/*604* This protects us against grabbing a queue that's being reconfigured605* by the inactivity checker.606*/607lockdep_assert_held(&mvm->mutex);608609if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))610return -EINVAL;611612memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));613614/* See what ACs the existing queues for this STA have */615for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {616/* Only DATA queues can be shared */617if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&618i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)619continue;620621ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;622}623624/*625* The queue to share is chosen only from DATA queues as follows (in626* descending priority):627* 1. An AC_BE queue628* 2. Same AC queue629* 3. Highest AC queue that is lower than new AC630* 4. Any existing AC (there always is at least 1 DATA queue)631*/632633/* Priority 1: An AC_BE queue */634if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)635queue = ac_to_queue[IEEE80211_AC_BE];636/* Priority 2: Same AC queue */637else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)638queue = ac_to_queue[ac];639/* Priority 3a: If new AC is VO and VI exists - use VI */640else if (ac == IEEE80211_AC_VO &&641ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)642queue = ac_to_queue[IEEE80211_AC_VI];643/* Priority 3b: No BE so only AC less than the new one is BK */644else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)645queue = ac_to_queue[IEEE80211_AC_BK];646/* Priority 4a: No BE nor BK - use VI if exists */647else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)648queue = ac_to_queue[IEEE80211_AC_VI];649/* Priority 4b: No BE, BK nor VI - use VO if exists */650else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)651queue = ac_to_queue[IEEE80211_AC_VO];652653/* Make sure queue found (or not) is legal */654if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&655!iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&656(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {657IWL_ERR(mvm, "No DATA queues available to share\n");658return -ENOSPC;659}660661return queue;662}663664/* Re-configure the SCD for a queue that has already been configured */665static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,666int sta_id, int tid, int frame_limit, u16 ssn)667{668struct iwl_scd_txq_cfg_cmd cmd = {669.scd_queue = queue,670.action = SCD_CFG_ENABLE_QUEUE,671.window = frame_limit,672.sta_id = sta_id,673.ssn = cpu_to_le16(ssn),674.tx_fifo = fifo,675.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||676queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),677.tid = tid,678};679int ret;680681if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))682return -EINVAL;683684if (WARN(mvm->queue_info[queue].tid_bitmap == 0,685"Trying to reconfig unallocated queue %d\n", queue))686return -ENXIO;687688IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);689690ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);691WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",692queue, fifo, ret);693694return ret;695}696697/*698* If a given queue has a higher AC than the TID stream that is being compared699* to, the queue needs to be redirected to the lower AC. This function does that700* in such a case, otherwise - if no redirection required - it does nothing,701* unless the %force param is true.702*/703static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,704int ac, int ssn, unsigned int wdg_timeout,705bool force, struct iwl_mvm_txq *txq)706{707struct iwl_scd_txq_cfg_cmd cmd = {708.scd_queue = queue,709.action = SCD_CFG_DISABLE_QUEUE,710};711bool shared_queue;712int ret;713714if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))715return -EINVAL;716717/*718* If the AC is lower than current one - FIFO needs to be redirected to719* the lowest one of the streams in the queue. Check if this is needed720* here.721* Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with722* value 3 and VO with value 0, so to check if ac X is lower than ac Y723* we need to check if the numerical value of X is LARGER than of Y.724*/725if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {726IWL_DEBUG_TX_QUEUES(mvm,727"No redirection needed on TXQ #%d\n",728queue);729return 0;730}731732cmd.sta_id = mvm->queue_info[queue].ra_sta_id;733cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];734cmd.tid = mvm->queue_info[queue].txq_tid;735shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;736737IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",738queue, iwl_mvm_ac_to_tx_fifo[ac]);739740/* Stop the queue and wait for it to empty */741set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);742743ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));744if (ret) {745IWL_ERR(mvm, "Error draining queue %d before reconfig\n",746queue);747ret = -EIO;748goto out;749}750751/* Before redirecting the queue we need to de-activate it */752iwl_trans_txq_disable(mvm->trans, queue, false);753ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);754if (ret)755IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,756ret);757758/* Make sure the SCD wrptr is correctly set before reconfiguring */759iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);760761/* Update the TID "owner" of the queue */762mvm->queue_info[queue].txq_tid = tid;763764/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */765766/* Redirect to lower AC */767iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],768cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);769770/* Update AC marking of the queue */771mvm->queue_info[queue].mac80211_ac = ac;772773/*774* Mark queue as shared in transport if shared775* Note this has to be done after queue enablement because enablement776* can also set this value, and there is no indication there to shared777* queues778*/779if (shared_queue)780iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);781782out:783/* Continue using the queue */784clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);785786return ret;787}788789static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,790u8 minq, u8 maxq)791{792int i;793794lockdep_assert_held(&mvm->mutex);795796if (WARN(maxq >= mvm->trans->mac_cfg->base->num_of_queues,797"max queue %d >= num_of_queues (%d)", maxq,798mvm->trans->mac_cfg->base->num_of_queues))799maxq = mvm->trans->mac_cfg->base->num_of_queues - 1;800801/* This should not be hit with new TX path */802if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))803return -ENOSPC;804805/* Start by looking for a free queue */806for (i = minq; i <= maxq; i++)807if (mvm->queue_info[i].tid_bitmap == 0 &&808mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)809return i;810811return -ENOSPC;812}813814static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta)815{816int max_size = IWL_DEFAULT_QUEUE_SIZE;817unsigned int link_id;818819/* this queue isn't used for traffic (cab_queue) */820if (!sta)821return IWL_MGMT_QUEUE_SIZE;822823rcu_read_lock();824825for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {826struct ieee80211_link_sta *link =827rcu_dereference(sta->link[link_id]);828829if (!link)830continue;831832/* support for 512 ba size */833if (link->eht_cap.has_eht &&834max_size < IWL_DEFAULT_QUEUE_SIZE_EHT)835max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;836837/* support for 256 ba size */838if (link->he_cap.has_he &&839max_size < IWL_DEFAULT_QUEUE_SIZE_HE)840max_size = IWL_DEFAULT_QUEUE_SIZE_HE;841}842843rcu_read_unlock();844return max_size;845}846847int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,848struct ieee80211_sta *sta,849u8 sta_id, u8 tid, unsigned int timeout)850{851int queue, size;852u32 sta_mask = 0;853854if (tid == IWL_MAX_TID_COUNT) {855tid = IWL_MGMT_TID;856size = max_t(u32, IWL_MGMT_QUEUE_SIZE,857mvm->trans->mac_cfg->base->min_txq_size);858} else {859size = iwl_mvm_get_queue_size(sta);860}861862if (sta) {863struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);864struct ieee80211_link_sta *link_sta;865unsigned int link_id;866867rcu_read_lock();868for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) {869struct iwl_mvm_link_sta *link =870rcu_dereference_protected(mvmsta->link[link_id],871lockdep_is_held(&mvm->mutex));872873if (!link)874continue;875876sta_mask |= BIT(link->sta_id);877}878rcu_read_unlock();879} else {880sta_mask |= BIT(sta_id);881}882883if (!sta_mask)884return -EINVAL;885886queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,887tid, size, timeout);888889if (queue >= 0)890IWL_DEBUG_TX_QUEUES(mvm,891"Enabling TXQ #%d for sta mask 0x%x tid %d\n",892queue, sta_mask, tid);893894return queue;895}896897static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,898struct ieee80211_sta *sta, u8 ac,899int tid)900{901struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);902struct iwl_mvm_txq *mvmtxq =903iwl_mvm_txq_from_tid(sta, tid);904unsigned int wdg_timeout =905iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);906int queue = -1;907908lockdep_assert_held(&mvm->mutex);909910IWL_DEBUG_TX_QUEUES(mvm,911"Allocating queue for sta %d on tid %d\n",912mvmsta->deflink.sta_id, tid);913queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id,914tid, wdg_timeout);915if (queue < 0)916return queue;917918mvmtxq->txq_id = queue;919mvm->tvqm_info[queue].txq_tid = tid;920mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id;921922IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);923924spin_lock_bh(&mvmsta->lock);925mvmsta->tid_data[tid].txq_id = queue;926spin_unlock_bh(&mvmsta->lock);927928return 0;929}930931static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,932struct ieee80211_sta *sta,933int queue, u8 sta_id, u8 tid)934{935bool enable_queue = true;936937/* Make sure this TID isn't already enabled */938if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {939IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",940queue, tid);941return false;942}943944/* Update mappings and refcounts */945if (mvm->queue_info[queue].tid_bitmap)946enable_queue = false;947948mvm->queue_info[queue].tid_bitmap |= BIT(tid);949mvm->queue_info[queue].ra_sta_id = sta_id;950951if (enable_queue) {952if (tid != IWL_MAX_TID_COUNT)953mvm->queue_info[queue].mac80211_ac =954tid_to_mac80211_ac[tid];955else956mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;957958mvm->queue_info[queue].txq_tid = tid;959}960961if (sta) {962struct iwl_mvm_txq *mvmtxq =963iwl_mvm_txq_from_tid(sta, tid);964965mvmtxq->txq_id = queue;966}967968IWL_DEBUG_TX_QUEUES(mvm,969"Enabling TXQ #%d tids=0x%x\n",970queue, mvm->queue_info[queue].tid_bitmap);971972return enable_queue;973}974975static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,976int queue, u16 ssn,977const struct iwl_trans_txq_scd_cfg *cfg,978unsigned int wdg_timeout)979{980struct iwl_scd_txq_cfg_cmd cmd = {981.scd_queue = queue,982.action = SCD_CFG_ENABLE_QUEUE,983.window = cfg->frame_limit,984.sta_id = cfg->sta_id,985.ssn = cpu_to_le16(ssn),986.tx_fifo = cfg->fifo,987.aggregate = cfg->aggregate,988.tid = cfg->tid,989};990bool inc_ssn;991992if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))993return false;994995/* Send the enabling command if we need to */996if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))997return false;998999inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,1000NULL, wdg_timeout);1001if (inc_ssn)1002le16_add_cpu(&cmd.ssn, 1);10031004WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),1005"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);10061007return inc_ssn;1008}10091010static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)1011{1012struct iwl_scd_txq_cfg_cmd cmd = {1013.scd_queue = queue,1014.action = SCD_CFG_UPDATE_QUEUE_TID,1015};1016int tid;1017unsigned long tid_bitmap;1018int ret;10191020lockdep_assert_held(&mvm->mutex);10211022if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))1023return;10241025tid_bitmap = mvm->queue_info[queue].tid_bitmap;10261027if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))1028return;10291030/* Find any TID for queue */1031tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);1032cmd.tid = tid;1033cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];10341035ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);1036if (ret) {1037IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",1038queue, ret);1039return;1040}10411042mvm->queue_info[queue].txq_tid = tid;1043IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",1044queue, tid);1045}10461047static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)1048{1049struct ieee80211_sta *sta;1050struct iwl_mvm_sta *mvmsta;1051u8 sta_id;1052int tid = -1;1053unsigned long tid_bitmap;1054unsigned int wdg_timeout;1055int ssn;1056int ret = true;10571058/* queue sharing is disabled on new TX path */1059if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))1060return;10611062lockdep_assert_held(&mvm->mutex);10631064sta_id = mvm->queue_info[queue].ra_sta_id;1065tid_bitmap = mvm->queue_info[queue].tid_bitmap;10661067/* Find TID for queue, and make sure it is the only one on the queue */1068tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);1069if (tid_bitmap != BIT(tid)) {1070IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",1071queue, tid_bitmap);1072return;1073}10741075IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,1076tid);10771078sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],1079lockdep_is_held(&mvm->mutex));10801081if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))1082return;10831084mvmsta = iwl_mvm_sta_from_mac80211(sta);1085wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);10861087ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);10881089ret = iwl_mvm_redirect_queue(mvm, queue, tid,1090tid_to_mac80211_ac[tid], ssn,1091wdg_timeout, true,1092iwl_mvm_txq_from_tid(sta, tid));1093if (ret) {1094IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);1095return;1096}10971098/* If aggs should be turned back on - do it */1099if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {1100struct iwl_mvm_add_sta_cmd cmd = {0};11011102mvmsta->tid_disable_agg &= ~BIT(tid);11031104cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);1105cmd.sta_id = mvmsta->deflink.sta_id;1106cmd.add_modify = STA_MODE_MODIFY;1107cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;1108cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);1109cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);11101111ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,1112iwl_mvm_add_sta_cmd_size(mvm), &cmd);1113if (!ret) {1114IWL_DEBUG_TX_QUEUES(mvm,1115"TXQ #%d is now aggregated again\n",1116queue);11171118/* Mark queue intenally as aggregating again */1119iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);1120}1121}11221123mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;1124}11251126/*1127* Remove inactive TIDs of a given queue.1128* If all queue TIDs are inactive - mark the queue as inactive1129* If only some the queue TIDs are inactive - unmap them from the queue1130*1131* Returns %true if all TIDs were removed and the queue could be reused.1132*/1133static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,1134struct iwl_mvm_sta *mvmsta, int queue,1135unsigned long tid_bitmap,1136unsigned long *unshare_queues,1137unsigned long *changetid_queues)1138{1139unsigned int tid;11401141lockdep_assert_held(&mvmsta->lock);1142lockdep_assert_held(&mvm->mutex);11431144if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))1145return false;11461147/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */1148for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {1149/* If some TFDs are still queued - don't mark TID as inactive */1150if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))1151tid_bitmap &= ~BIT(tid);11521153/* Don't mark as inactive any TID that has an active BA */1154if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)1155tid_bitmap &= ~BIT(tid);1156}11571158/* If all TIDs in the queue are inactive - return it can be reused */1159if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {1160IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);1161return true;1162}11631164/*1165* If we are here, this is a shared queue and not all TIDs timed-out.1166* Remove the ones that did.1167*/1168for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {1169u16 q_tid_bitmap;11701171mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;1172mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);11731174q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;11751176/*1177* We need to take into account a situation in which a TXQ was1178* allocated to TID x, and then turned shared by adding TIDs y1179* and z. If TID x becomes inactive and is removed from the TXQ,1180* ownership must be given to one of the remaining TIDs.1181* This is mainly because if TID x continues - a new queue can't1182* be allocated for it as long as it is an owner of another TXQ.1183*1184* Mark this queue in the right bitmap, we'll send the command1185* to the firmware later.1186*/1187if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))1188set_bit(queue, changetid_queues);11891190IWL_DEBUG_TX_QUEUES(mvm,1191"Removing inactive TID %d from shared Q:%d\n",1192tid, queue);1193}11941195IWL_DEBUG_TX_QUEUES(mvm,1196"TXQ #%d left with tid bitmap 0x%x\n", queue,1197mvm->queue_info[queue].tid_bitmap);11981199/*1200* There may be different TIDs with the same mac queues, so make1201* sure all TIDs have existing corresponding mac queues enabled1202*/1203tid_bitmap = mvm->queue_info[queue].tid_bitmap;12041205/* If the queue is marked as shared - "unshare" it */1206if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&1207mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {1208IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",1209queue);1210set_bit(queue, unshare_queues);1211}12121213return false;1214}12151216/*1217* Check for inactivity - this includes checking if any queue1218* can be unshared and finding one (and only one) that can be1219* reused.1220* This function is also invoked as a sort of clean-up task,1221* in which case @alloc_for_sta is IWL_INVALID_STA.1222*1223* Returns the queue number, or -ENOSPC.1224*/1225static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)1226{1227unsigned long now = jiffies;1228unsigned long unshare_queues = 0;1229unsigned long changetid_queues = 0;1230int i, ret, free_queue = -ENOSPC;1231struct ieee80211_sta *queue_owner = NULL;12321233lockdep_assert_held(&mvm->mutex);12341235if (iwl_mvm_has_new_tx_api(mvm))1236return -ENOSPC;12371238rcu_read_lock();12391240/* we skip the CMD queue below by starting at 1 */1241BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);12421243for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {1244struct ieee80211_sta *sta;1245struct iwl_mvm_sta *mvmsta;1246u8 sta_id;1247int tid;1248unsigned long inactive_tid_bitmap = 0;1249unsigned long queue_tid_bitmap;12501251queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;1252if (!queue_tid_bitmap)1253continue;12541255/* If TXQ isn't in active use anyway - nothing to do here... */1256if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&1257mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)1258continue;12591260/* Check to see if there are inactive TIDs on this queue */1261for_each_set_bit(tid, &queue_tid_bitmap,1262IWL_MAX_TID_COUNT + 1) {1263if (time_after(mvm->queue_info[i].last_frame_time[tid] +1264IWL_MVM_DQA_QUEUE_TIMEOUT, now))1265continue;12661267inactive_tid_bitmap |= BIT(tid);1268}12691270/* If all TIDs are active - finish check on this queue */1271if (!inactive_tid_bitmap)1272continue;12731274/*1275* If we are here - the queue hadn't been served recently and is1276* in use1277*/12781279sta_id = mvm->queue_info[i].ra_sta_id;1280sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);12811282/*1283* If the STA doesn't exist anymore, it isn't an error. It could1284* be that it was removed since getting the queues, and in this1285* case it should've inactivated its queues anyway.1286*/1287if (IS_ERR_OR_NULL(sta))1288continue;12891290mvmsta = iwl_mvm_sta_from_mac80211(sta);12911292spin_lock_bh(&mvmsta->lock);1293ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,1294inactive_tid_bitmap,1295&unshare_queues,1296&changetid_queues);1297if (ret && free_queue < 0) {1298queue_owner = sta;1299free_queue = i;1300}1301/* only unlock sta lock - we still need the queue info lock */1302spin_unlock_bh(&mvmsta->lock);1303}130413051306/* Reconfigure queues requiring reconfiguation */1307for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)1308iwl_mvm_unshare_queue(mvm, i);1309for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)1310iwl_mvm_change_queue_tid(mvm, i);13111312rcu_read_unlock();13131314if (free_queue >= 0 && alloc_for_sta != IWL_INVALID_STA) {1315ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,1316alloc_for_sta);1317if (ret)1318return ret;1319}13201321return free_queue;1322}13231324static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,1325struct ieee80211_sta *sta, u8 ac, int tid)1326{1327struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);1328struct iwl_trans_txq_scd_cfg cfg = {1329.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),1330.sta_id = mvmsta->deflink.sta_id,1331.tid = tid,1332.frame_limit = IWL_FRAME_LIMIT,1333};1334unsigned int wdg_timeout =1335iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);1336int queue = -1;1337u16 queue_tmp;1338unsigned long disable_agg_tids = 0;1339enum iwl_mvm_agg_state queue_state;1340bool shared_queue = false, inc_ssn;1341int ssn;1342unsigned long tfd_queue_mask;1343int ret;13441345lockdep_assert_held(&mvm->mutex);13461347if (iwl_mvm_has_new_tx_api(mvm))1348return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);13491350spin_lock_bh(&mvmsta->lock);1351tfd_queue_mask = mvmsta->tfd_queue_msk;1352ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);1353spin_unlock_bh(&mvmsta->lock);13541355if (tid == IWL_MAX_TID_COUNT) {1356queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,1357IWL_MVM_DQA_MIN_MGMT_QUEUE,1358IWL_MVM_DQA_MAX_MGMT_QUEUE);1359if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)1360IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",1361queue);13621363/* If no such queue is found, we'll use a DATA queue instead */1364}13651366if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&1367(mvm->queue_info[mvmsta->reserved_queue].status ==1368IWL_MVM_QUEUE_RESERVED)) {1369queue = mvmsta->reserved_queue;1370mvm->queue_info[queue].reserved = true;1371IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);1372}13731374if (queue < 0)1375queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,1376IWL_MVM_DQA_MIN_DATA_QUEUE,1377IWL_MVM_DQA_MAX_DATA_QUEUE);1378if (queue < 0) {1379/* try harder - perhaps kill an inactive queue */1380queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);1381}13821383/* No free queue - we'll have to share */1384if (queue <= 0) {1385queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);1386if (queue > 0) {1387shared_queue = true;1388mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;1389}1390}13911392/*1393* Mark TXQ as ready, even though it hasn't been fully configured yet,1394* to make sure no one else takes it.1395* This will allow avoiding re-acquiring the lock at the end of the1396* configuration. On error we'll mark it back as free.1397*/1398if (queue > 0 && !shared_queue)1399mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;14001401/* This shouldn't happen - out of queues */1402if (WARN_ON(queue <= 0)) {1403IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",1404tid, cfg.sta_id);1405return queue;1406}14071408/*1409* Actual en/disablement of aggregations is through the ADD_STA HCMD,1410* but for configuring the SCD to send A-MPDUs we need to mark the queue1411* as aggregatable.1412* Mark all DATA queues as allowing to be aggregated at some point1413*/1414cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||1415queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);14161417IWL_DEBUG_TX_QUEUES(mvm,1418"Allocating %squeue #%d to sta %d on tid %d\n",1419shared_queue ? "shared " : "", queue,1420mvmsta->deflink.sta_id, tid);14211422if (shared_queue) {1423/* Disable any open aggs on this queue */1424disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);14251426if (disable_agg_tids) {1427IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",1428queue);1429iwl_mvm_invalidate_sta_queue(mvm, queue,1430disable_agg_tids, false);1431}1432}14331434inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);14351436/*1437* Mark queue as shared in transport if shared1438* Note this has to be done after queue enablement because enablement1439* can also set this value, and there is no indication there to shared1440* queues1441*/1442if (shared_queue)1443iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);14441445spin_lock_bh(&mvmsta->lock);1446/*1447* This looks racy, but it is not. We have only one packet for1448* this ra/tid in our Tx path since we stop the Qdisc when we1449* need to allocate a new TFD queue.1450*/1451if (inc_ssn) {1452mvmsta->tid_data[tid].seq_number += 0x10;1453ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;1454}1455mvmsta->tid_data[tid].txq_id = queue;1456mvmsta->tfd_queue_msk |= BIT(queue);1457queue_state = mvmsta->tid_data[tid].state;14581459if (mvmsta->reserved_queue == queue)1460mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;1461spin_unlock_bh(&mvmsta->lock);14621463if (!shared_queue) {1464ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);1465if (ret)1466goto out_err;14671468/* If we need to re-enable aggregations... */1469if (queue_state == IWL_AGG_ON) {1470ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);1471if (ret)1472goto out_err;1473}1474} else {1475/* Redirect queue, if needed */1476ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,1477wdg_timeout, false,1478iwl_mvm_txq_from_tid(sta, tid));1479if (ret)1480goto out_err;1481}14821483return 0;14841485out_err:1486queue_tmp = queue;1487iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid);14881489return ret;1490}14911492int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,1493struct ieee80211_txq *txq)1494{1495struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);1496int ret = -EINVAL;14971498lockdep_assert_held(&mvm->mutex);14991500if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||1501!txq->sta) {1502return 0;1503}15041505if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {1506set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);1507ret = 0;1508}15091510local_bh_disable();1511spin_lock(&mvm->add_stream_lock);1512if (!list_empty(&mvmtxq->list))1513list_del_init(&mvmtxq->list);1514spin_unlock(&mvm->add_stream_lock);1515local_bh_enable();15161517return ret;1518}15191520void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)1521{1522struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,1523add_stream_wk);15241525guard(mvm)(mvm);15261527/* will reschedule to run after restart */1528if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ||1529test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))1530return;15311532iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);15331534while (!list_empty(&mvm->add_stream_txqs)) {1535struct iwl_mvm_txq *mvmtxq;1536struct ieee80211_txq *txq;1537u8 tid;15381539mvmtxq = list_first_entry(&mvm->add_stream_txqs,1540struct iwl_mvm_txq, list);15411542txq = container_of((void *)mvmtxq, struct ieee80211_txq,1543drv_priv);1544tid = txq->tid;1545if (tid == IEEE80211_NUM_TIDS)1546tid = IWL_MAX_TID_COUNT;15471548/*1549* We can't really do much here, but if this fails we can't1550* transmit anyway - so just don't transmit the frame etc.1551* and let them back up ... we've tried our best to allocate1552* a queue in the function itself.1553*/1554if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {1555spin_lock_bh(&mvm->add_stream_lock);1556list_del_init(&mvmtxq->list);1557spin_unlock_bh(&mvm->add_stream_lock);1558continue;1559}15601561/* now we're ready, any remaining races/concurrency will be1562* handled in iwl_mvm_mac_itxq_xmit()1563*/1564set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);15651566local_bh_disable();1567spin_lock(&mvm->add_stream_lock);1568list_del_init(&mvmtxq->list);1569spin_unlock(&mvm->add_stream_lock);15701571iwl_mvm_mac_itxq_xmit(mvm->hw, txq);1572local_bh_enable();1573}1574}15751576static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,1577struct ieee80211_sta *sta,1578enum nl80211_iftype vif_type)1579{1580struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);1581int queue;15821583/* queue reserving is disabled on new TX path */1584if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))1585return 0;15861587/* run the general cleanup/unsharing of queues */1588iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);15891590/* Make sure we have free resources for this STA */1591if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&1592!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&1593(mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==1594IWL_MVM_QUEUE_FREE))1595queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;1596else1597queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,1598IWL_MVM_DQA_MIN_DATA_QUEUE,1599IWL_MVM_DQA_MAX_DATA_QUEUE);1600if (queue < 0) {1601/* try again - this time kick out a queue if needed */1602queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);1603if (queue < 0) {1604IWL_ERR(mvm, "No available queues for new station\n");1605return -ENOSPC;1606}1607}1608mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;16091610mvmsta->reserved_queue = queue;16111612IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",1613queue, mvmsta->deflink.sta_id);16141615return 0;1616}16171618/*1619* In DQA mode, after a HW restart the queues should be allocated as before, in1620* order to avoid race conditions when there are shared queues. This function1621* does the re-mapping and queue allocation.1622*1623* Note that re-enabling aggregations isn't done in this function.1624*/1625void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,1626struct ieee80211_sta *sta)1627{1628struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);1629unsigned int wdg =1630iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif);1631int i;1632struct iwl_trans_txq_scd_cfg cfg = {1633.sta_id = mvm_sta->deflink.sta_id,1634.frame_limit = IWL_FRAME_LIMIT,1635};16361637/* Make sure reserved queue is still marked as such (if allocated) */1638if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)1639mvm->queue_info[mvm_sta->reserved_queue].status =1640IWL_MVM_QUEUE_RESERVED;16411642for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {1643struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];1644int txq_id = tid_data->txq_id;1645int ac;16461647if (txq_id == IWL_MVM_INVALID_QUEUE)1648continue;16491650ac = tid_to_mac80211_ac[i];16511652if (iwl_mvm_has_new_tx_api(mvm)) {1653IWL_DEBUG_TX_QUEUES(mvm,1654"Re-mapping sta %d tid %d\n",1655mvm_sta->deflink.sta_id, i);1656txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta,1657mvm_sta->deflink.sta_id,1658i, wdg);1659/*1660* on failures, just set it to IWL_MVM_INVALID_QUEUE1661* to try again later, we have no other good way of1662* failing here1663*/1664if (txq_id < 0)1665txq_id = IWL_MVM_INVALID_QUEUE;1666tid_data->txq_id = txq_id;16671668/*1669* Since we don't set the seq number after reset, and HW1670* sets it now, FW reset will cause the seq num to start1671* at 0 again, so driver will need to update it1672* internally as well, so it keeps in sync with real val1673*/1674tid_data->seq_number = 0;1675} else {1676u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);16771678cfg.tid = i;1679cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);1680cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||1681txq_id ==1682IWL_MVM_DQA_BSS_CLIENT_QUEUE);16831684IWL_DEBUG_TX_QUEUES(mvm,1685"Re-mapping sta %d tid %d to queue %d\n",1686mvm_sta->deflink.sta_id, i,1687txq_id);16881689iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);1690mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;1691}1692}1693}16941695static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,1696struct iwl_mvm_int_sta *sta,1697const u8 *addr,1698u16 mac_id, u16 color)1699{1700struct iwl_mvm_add_sta_cmd cmd;1701int ret;1702u32 status = ADD_STA_SUCCESS;17031704lockdep_assert_held(&mvm->mutex);17051706memset(&cmd, 0, sizeof(cmd));1707cmd.sta_id = sta->sta_id;17081709if (iwl_mvm_has_new_station_api(mvm->fw) &&1710sta->type == IWL_STA_AUX_ACTIVITY)1711cmd.mac_id_n_color = cpu_to_le32(mac_id);1712else1713cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,1714color));17151716if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))1717cmd.station_type = sta->type;17181719if (!iwl_mvm_has_new_tx_api(mvm))1720cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);1721cmd.tid_disable_tx = cpu_to_le16(0xffff);17221723if (addr)1724memcpy(cmd.addr, addr, ETH_ALEN);17251726ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,1727iwl_mvm_add_sta_cmd_size(mvm),1728&cmd, &status);1729if (ret)1730return ret;17311732switch (status & IWL_ADD_STA_STATUS_MASK) {1733case ADD_STA_SUCCESS:1734IWL_DEBUG_INFO(mvm, "Internal station added.\n");1735return 0;1736default:1737ret = -EIO;1738IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",1739status);1740break;1741}1742return ret;1743}17441745/* Initialize driver data of a new sta */1746int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,1747struct ieee80211_sta *sta, int sta_id, u8 sta_type)1748{1749struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);1750struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);1751struct iwl_mvm_rxq_dup_data *dup_data;1752int i, ret = 0;17531754lockdep_assert_held(&mvm->mutex);17551756mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,1757mvmvif->color);1758mvm_sta->vif = vif;17591760/* for MLD sta_id(s) should be allocated for each link before calling1761* this function1762*/1763if (!mvm->mld_api_is_used) {1764if (WARN_ON(sta_id == IWL_INVALID_STA))1765return -EINVAL;17661767mvm_sta->deflink.sta_id = sta_id;1768rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);17691770if (!mvm->trans->mac_cfg->gen2)1771mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =1772LINK_QUAL_AGG_FRAME_LIMIT_DEF;1773else1774mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =1775LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;1776}17771778mvm_sta->tt_tx_protection = false;1779mvm_sta->sta_type = sta_type;17801781mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */17821783for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {1784/*1785* Mark all queues for this STA as unallocated and defer TX1786* frames until the queue is allocated1787*/1788mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;1789}17901791for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {1792struct iwl_mvm_txq *mvmtxq =1793iwl_mvm_txq_from_mac80211(sta->txq[i]);17941795mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;1796INIT_LIST_HEAD(&mvmtxq->list);1797atomic_set(&mvmtxq->tx_request, 0);1798}17991800if (iwl_mvm_has_new_rx_api(mvm)) {1801int q;18021803dup_data = kcalloc(mvm->trans->info.num_rxqs,1804sizeof(*dup_data), GFP_KERNEL);1805if (!dup_data)1806return -ENOMEM;1807/*1808* Initialize all the last_seq values to 0xffff which can never1809* compare equal to the frame's seq_ctrl in the check in1810* iwl_mvm_is_dup() since the lower 4 bits are the fragment1811* number and fragmented packets don't reach that function.1812*1813* This thus allows receiving a packet with seqno 0 and the1814* retry bit set as the very first packet on a new TID.1815*/1816for (q = 0; q < mvm->trans->info.num_rxqs; q++)1817memset(dup_data[q].last_seq, 0xff,1818sizeof(dup_data[q].last_seq));1819mvm_sta->dup_data = dup_data;1820}18211822if (!iwl_mvm_has_new_tx_api(mvm)) {1823ret = iwl_mvm_reserve_sta_stream(mvm, sta,1824ieee80211_vif_type_p2p(vif));1825if (ret)1826return ret;1827}18281829/*1830* if rs is registered with mac80211, then "add station" will be handled1831* via the corresponding ops, otherwise need to notify rate scaling here1832*/1833if (iwl_mvm_has_tlc_offload(mvm))1834iwl_mvm_rs_add_sta(mvm, mvm_sta);1835else1836spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock);18371838iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);18391840/* MPDUs are counted only when EMLSR is possible */1841if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&1842!sta->tdls && ieee80211_vif_is_mld(vif)) {1843mvm_sta->mpdu_counters =1844kcalloc(mvm->trans->info.num_rxqs,1845sizeof(*mvm_sta->mpdu_counters),1846GFP_KERNEL);1847if (mvm_sta->mpdu_counters)1848for (int q = 0; q < mvm->trans->info.num_rxqs; q++)1849spin_lock_init(&mvm_sta->mpdu_counters[q].lock);1850}18511852return 0;1853}18541855int iwl_mvm_add_sta(struct iwl_mvm *mvm,1856struct ieee80211_vif *vif,1857struct ieee80211_sta *sta)1858{1859struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);1860struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);1861int ret, sta_id;1862bool sta_update = false;1863unsigned int sta_flags = 0;18641865lockdep_assert_held(&mvm->mutex);18661867if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))1868sta_id = iwl_mvm_find_free_sta_id(mvm,1869ieee80211_vif_type_p2p(vif));1870else1871sta_id = mvm_sta->deflink.sta_id;18721873if (sta_id == IWL_INVALID_STA)1874return -ENOSPC;18751876spin_lock_init(&mvm_sta->lock);18771878/* if this is a HW restart re-alloc existing queues */1879if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {1880struct iwl_mvm_int_sta tmp_sta = {1881.sta_id = sta_id,1882.type = mvm_sta->sta_type,1883};18841885/* First add an empty station since allocating1886* a queue requires a valid station1887*/1888ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,1889mvmvif->id, mvmvif->color);1890if (ret)1891goto err;18921893iwl_mvm_realloc_queues_after_restart(mvm, sta);1894sta_update = true;1895sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;1896goto update_fw;1897}18981899ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id,1900sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK);1901if (ret)1902goto err;19031904update_fw:1905ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);1906if (ret)1907goto err;19081909if (vif->type == NL80211_IFTYPE_STATION) {1910if (!sta->tdls) {1911WARN_ON(mvmvif->deflink.ap_sta_id != IWL_INVALID_STA);1912mvmvif->deflink.ap_sta_id = sta_id;1913} else {1914WARN_ON(mvmvif->deflink.ap_sta_id == IWL_INVALID_STA);1915}1916}19171918rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);19191920return 0;19211922err:1923return ret;1924}19251926int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,1927bool drain)1928{1929struct iwl_mvm_add_sta_cmd cmd = {};1930int ret;1931u32 status;19321933lockdep_assert_held(&mvm->mutex);19341935cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);1936cmd.sta_id = mvmsta->deflink.sta_id;1937cmd.add_modify = STA_MODE_MODIFY;1938cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;1939cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);19401941status = ADD_STA_SUCCESS;1942ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,1943iwl_mvm_add_sta_cmd_size(mvm),1944&cmd, &status);1945if (ret)1946return ret;19471948switch (status & IWL_ADD_STA_STATUS_MASK) {1949case ADD_STA_SUCCESS:1950IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",1951mvmsta->deflink.sta_id);1952break;1953default:1954ret = -EIO;1955#if defined(__linux__)1956IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",1957mvmsta->deflink.sta_id);1958#elif defined(__FreeBSD__)1959IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n",1960mvmsta->deflink.sta_id, status);1961#endif1962break;1963}19641965return ret;1966}19671968/*1969* Remove a station from the FW table. Before sending the command to remove1970* the station validate that the station is indeed known to the driver (sanity1971* only).1972*/1973static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)1974{1975struct ieee80211_sta *sta;1976struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {1977.sta_id = sta_id,1978};1979int ret;19801981sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],1982lockdep_is_held(&mvm->mutex));19831984/* Note: internal stations are marked as error values */1985if (!sta) {1986IWL_ERR(mvm, "Invalid station id\n");1987return -EINVAL;1988}19891990ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,1991sizeof(rm_sta_cmd), &rm_sta_cmd);1992if (ret) {1993IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);1994return ret;1995}19961997return 0;1998}19992000static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,2001struct ieee80211_vif *vif,2002struct ieee80211_sta *sta)2003{2004struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);2005int i;20062007lockdep_assert_held(&mvm->mutex);20082009for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {2010if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)2011continue;20122013iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id,2014&mvm_sta->tid_data[i].txq_id, i);2015mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;2016}20172018for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {2019struct iwl_mvm_txq *mvmtxq =2020iwl_mvm_txq_from_mac80211(sta->txq[i]);20212022spin_lock_bh(&mvm->add_stream_lock);2023mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;2024list_del_init(&mvmtxq->list);2025clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);2026spin_unlock_bh(&mvm->add_stream_lock);2027}2028}20292030int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,2031struct iwl_mvm_sta *mvm_sta)2032{2033int i;20342035for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {2036u16 txq_id;2037int ret;20382039spin_lock_bh(&mvm_sta->lock);2040txq_id = mvm_sta->tid_data[i].txq_id;2041spin_unlock_bh(&mvm_sta->lock);20422043if (txq_id == IWL_MVM_INVALID_QUEUE)2044continue;20452046ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);2047if (ret)2048return ret;2049}20502051return 0;2052}20532054/* Execute the common part for both MLD and non-MLD modes.2055* Returns if we're done with removing the station, either2056* with error or success2057*/2058void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,2059struct ieee80211_sta *sta,2060struct ieee80211_link_sta *link_sta)2061{2062struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);2063struct iwl_mvm_vif_link_info *mvm_link =2064mvmvif->link[link_sta->link_id];2065struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);2066struct iwl_mvm_link_sta *mvm_link_sta;2067u8 sta_id;20682069lockdep_assert_held(&mvm->mutex);20702071mvm_link_sta =2072rcu_dereference_protected(mvm_sta->link[link_sta->link_id],2073lockdep_is_held(&mvm->mutex));2074sta_id = mvm_link_sta->sta_id;20752076if (vif->type == NL80211_IFTYPE_STATION &&2077mvm_link->ap_sta_id == sta_id) {2078/* first remove remaining keys */2079iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link,2080link_sta->link_id);20812082mvm_link->ap_sta_id = IWL_INVALID_STA;2083}20842085/*2086* This shouldn't happen - the TDLS channel switch should be canceled2087* before the STA is removed.2088*/2089if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {2090mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;2091cancel_delayed_work(&mvm->tdls_cs.dwork);2092}2093}20942095int iwl_mvm_rm_sta(struct iwl_mvm *mvm,2096struct ieee80211_vif *vif,2097struct ieee80211_sta *sta)2098{2099struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);2100int ret;21012102lockdep_assert_held(&mvm->mutex);21032104ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);2105if (ret)2106return ret;21072108/* flush its queues here since we are freeing mvm_sta */2109ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,2110mvm_sta->tfd_queue_msk);2111if (ret)2112return ret;2113if (iwl_mvm_has_new_tx_api(mvm)) {2114ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);2115} else {2116u32 q_mask = mvm_sta->tfd_queue_msk;21172118ret = iwl_trans_wait_tx_queues_empty(mvm->trans,2119q_mask);2120}2121if (ret)2122return ret;21232124ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);21252126iwl_mvm_disable_sta_queues(mvm, vif, sta);21272128/* If there is a TXQ still marked as reserved - free it */2129if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {2130u8 reserved_txq = mvm_sta->reserved_queue;2131enum iwl_mvm_queue_status *status;21322133/*2134* If no traffic has gone through the reserved TXQ - it2135* is still marked as IWL_MVM_QUEUE_RESERVED, and2136* should be manually marked as free again2137*/2138status = &mvm->queue_info[reserved_txq].status;2139if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&2140(*status != IWL_MVM_QUEUE_FREE),2141"sta_id %d reserved txq %d status %d",2142mvm_sta->deflink.sta_id, reserved_txq, *status))2143return -EINVAL;21442145*status = IWL_MVM_QUEUE_FREE;2146}21472148iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink);21492150ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);2151RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);21522153return ret;2154}21552156int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,2157struct ieee80211_vif *vif,2158u8 sta_id)2159{2160int ret = iwl_mvm_rm_sta_common(mvm, sta_id);21612162lockdep_assert_held(&mvm->mutex);21632164RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);2165return ret;2166}21672168int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,2169struct iwl_mvm_int_sta *sta,2170u32 qmask, enum nl80211_iftype iftype,2171u8 type)2172{2173if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||2174sta->sta_id == IWL_INVALID_STA) {2175sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);2176if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))2177return -ENOSPC;2178}21792180sta->tfd_queue_msk = qmask;2181sta->type = type;21822183/* put a non-NULL value so iterating over the stations won't stop */2184RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));2185return 0;2186}21872188void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)2189{2190RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);2191memset(sta, 0, sizeof(struct iwl_mvm_int_sta));2192sta->sta_id = IWL_INVALID_STA;2193}21942195static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,2196u8 sta_id, u8 fifo)2197{2198unsigned int wdg_timeout =2199mvm->trans->mac_cfg->base->wd_timeout;2200struct iwl_trans_txq_scd_cfg cfg = {2201.fifo = fifo,2202.sta_id = sta_id,2203.tid = IWL_MAX_TID_COUNT,2204.aggregate = false,2205.frame_limit = IWL_FRAME_LIMIT,2206};22072208WARN_ON(iwl_mvm_has_new_tx_api(mvm));22092210iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);2211}22122213static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)2214{2215unsigned int wdg_timeout =2216mvm->trans->mac_cfg->base->wd_timeout;22172218WARN_ON(!iwl_mvm_has_new_tx_api(mvm));22192220return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT,2221wdg_timeout);2222}22232224static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,2225int maccolor, u8 *addr,2226struct iwl_mvm_int_sta *sta,2227u16 *queue, int fifo)2228{2229int ret;22302231/* Map queue to fifo - needs to happen before adding station */2232if (!iwl_mvm_has_new_tx_api(mvm))2233iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);22342235ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);2236if (ret) {2237if (!iwl_mvm_has_new_tx_api(mvm))2238iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue,2239IWL_MAX_TID_COUNT);2240return ret;2241}22422243/*2244* For 22000 firmware and on we cannot add queue to a station unknown2245* to firmware so enable queue here - after the station was added2246*/2247if (iwl_mvm_has_new_tx_api(mvm)) {2248int txq;22492250txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);2251if (txq < 0) {2252iwl_mvm_rm_sta_common(mvm, sta->sta_id);2253return txq;2254}22552256*queue = txq;2257}22582259return 0;2260}22612262int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)2263{2264int ret;2265u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 :2266BIT(mvm->aux_queue);22672268lockdep_assert_held(&mvm->mutex);22692270/* Allocate aux station and assign to it the aux queue */2271ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask,2272NL80211_IFTYPE_UNSPECIFIED,2273IWL_STA_AUX_ACTIVITY);2274if (ret)2275return ret;22762277/*2278* In CDB NICs we need to specify which lmac to use for aux activity2279* using the mac_id argument place to send lmac_id to the function2280*/2281ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,2282&mvm->aux_sta, &mvm->aux_queue,2283IWL_MVM_TX_FIFO_MCAST);2284if (ret) {2285iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);2286return ret;2287}22882289return 0;2290}22912292int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2293{2294struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);22952296lockdep_assert_held(&mvm->mutex);22972298return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,2299NULL, &mvm->snif_sta,2300&mvm->snif_queue,2301IWL_MVM_TX_FIFO_BE);2302}23032304int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2305{2306int ret;23072308lockdep_assert_held(&mvm->mutex);23092310if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_INVALID_STA))2311return -EINVAL;23122313iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,2314&mvm->snif_queue, IWL_MAX_TID_COUNT);2315ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);2316if (ret)2317IWL_WARN(mvm, "Failed sending remove station\n");23182319return ret;2320}23212322int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)2323{2324int ret;23252326lockdep_assert_held(&mvm->mutex);23272328if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_INVALID_STA))2329return -EINVAL;23302331iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,2332&mvm->aux_queue, IWL_MAX_TID_COUNT);2333ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);2334if (ret)2335IWL_WARN(mvm, "Failed sending remove station\n");2336iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);23372338return ret;2339}23402341void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)2342{2343iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);2344}23452346/*2347* Send the add station command for the vif's broadcast station.2348* Assumes that the station was already allocated.2349*2350* @mvm: the mvm component2351* @vif: the interface to which the broadcast station is added2352* @bsta: the broadcast station to add.2353*/2354int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2355{2356struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);2357struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;2358static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};2359const u8 *baddr = _baddr;2360int queue;2361int ret;2362unsigned int wdg_timeout =2363iwl_mvm_get_wd_timeout(mvm, vif);2364struct iwl_trans_txq_scd_cfg cfg = {2365.fifo = IWL_MVM_TX_FIFO_VO,2366.sta_id = mvmvif->deflink.bcast_sta.sta_id,2367.tid = IWL_MAX_TID_COUNT,2368.aggregate = false,2369.frame_limit = IWL_FRAME_LIMIT,2370};23712372lockdep_assert_held(&mvm->mutex);23732374if (!iwl_mvm_has_new_tx_api(mvm)) {2375if (vif->type == NL80211_IFTYPE_AP ||2376vif->type == NL80211_IFTYPE_ADHOC) {2377queue = mvm->probe_queue;2378} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {2379queue = mvm->p2p_dev_queue;2380} else {2381WARN(1, "Missing required TXQ for adding bcast STA\n");2382return -EINVAL;2383}23842385bsta->tfd_queue_msk |= BIT(queue);23862387iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);2388}23892390if (vif->type == NL80211_IFTYPE_ADHOC)2391baddr = vif->bss_conf.bssid;23922393if (WARN_ON_ONCE(bsta->sta_id == IWL_INVALID_STA))2394return -ENOSPC;23952396ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,2397mvmvif->id, mvmvif->color);2398if (ret)2399return ret;24002401/*2402* For 22000 firmware and on we cannot add queue to a station unknown2403* to firmware so enable queue here - after the station was added2404*/2405if (iwl_mvm_has_new_tx_api(mvm)) {2406queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id,2407IWL_MAX_TID_COUNT,2408wdg_timeout);2409if (queue < 0) {2410iwl_mvm_rm_sta_common(mvm, bsta->sta_id);2411return queue;2412}24132414if (vif->type == NL80211_IFTYPE_AP ||2415vif->type == NL80211_IFTYPE_ADHOC) {2416/* for queue management */2417mvm->probe_queue = queue;2418/* for use in TX */2419mvmvif->deflink.mgmt_queue = queue;2420} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {2421mvm->p2p_dev_queue = queue;2422}2423} else if (vif->type == NL80211_IFTYPE_AP ||2424vif->type == NL80211_IFTYPE_ADHOC) {2425/* set it for use in TX */2426mvmvif->deflink.mgmt_queue = mvm->probe_queue;2427}24282429return 0;2430}24312432void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,2433struct ieee80211_vif *vif)2434{2435struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);2436u16 *queueptr, queue;24372438lockdep_assert_held(&mvm->mutex);24392440iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,2441mvmvif->deflink.bcast_sta.tfd_queue_msk);24422443switch (vif->type) {2444case NL80211_IFTYPE_AP:2445case NL80211_IFTYPE_ADHOC:2446queueptr = &mvm->probe_queue;2447break;2448case NL80211_IFTYPE_P2P_DEVICE:2449queueptr = &mvm->p2p_dev_queue;2450break;2451default:2452WARN(1, "Can't free bcast queue on vif type %d\n",2453vif->type);2454return;2455}24562457queue = *queueptr;2458iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id,2459queueptr, IWL_MAX_TID_COUNT);24602461if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)2462mvmvif->deflink.mgmt_queue = mvm->probe_queue;24632464if (iwl_mvm_has_new_tx_api(mvm))2465return;24662467WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue)));2468mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue);2469}24702471/* Send the FW a request to remove the station from its internal data2472* structures, but DO NOT remove the entry from the local data structures. */2473int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2474{2475struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);2476int ret;24772478lockdep_assert_held(&mvm->mutex);24792480iwl_mvm_free_bcast_sta_queues(mvm, vif);24812482ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id);2483if (ret)2484IWL_WARN(mvm, "Failed sending remove station\n");2485return ret;2486}24872488int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2489{2490struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);24912492lockdep_assert_held(&mvm->mutex);24932494return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0,2495ieee80211_vif_type_p2p(vif),2496IWL_STA_GENERAL_PURPOSE);2497}24982499/* Allocate a new station entry for the broadcast station to the given vif,2500* and send it to the FW.2501* Note that each P2P mac should have its own broadcast station.2502*2503* @mvm: the mvm component2504* @vif: the interface to which the broadcast station is added2505* @bsta: the broadcast station to add. */2506int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2507{2508struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);2509struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;2510int ret;25112512lockdep_assert_held(&mvm->mutex);25132514ret = iwl_mvm_alloc_bcast_sta(mvm, vif);2515if (ret)2516return ret;25172518ret = iwl_mvm_send_add_bcast_sta(mvm, vif);25192520if (ret)2521iwl_mvm_dealloc_int_sta(mvm, bsta);25222523return ret;2524}25252526void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2527{2528struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);25292530iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta);2531}25322533/*2534* Send the FW a request to remove the station from its internal data2535* structures, and in addition remove it from the local data structure.2536*/2537int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2538{2539int ret;25402541lockdep_assert_held(&mvm->mutex);25422543ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);25442545iwl_mvm_dealloc_bcast_sta(mvm, vif);25462547return ret;2548}25492550/*2551* Allocate a new station entry for the multicast station to the given vif,2552* and send it to the FW.2553* Note that each AP/GO mac should have its own multicast station.2554*2555* @mvm: the mvm component2556* @vif: the interface to which the multicast station is added2557*/2558int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2559{2560struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);2561struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta;2562static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};2563const u8 *maddr = _maddr;2564struct iwl_trans_txq_scd_cfg cfg = {2565.fifo = vif->type == NL80211_IFTYPE_AP ?2566IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,2567.sta_id = msta->sta_id,2568.tid = 0,2569.aggregate = false,2570.frame_limit = IWL_FRAME_LIMIT,2571};2572unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);2573int ret;25742575lockdep_assert_held(&mvm->mutex);25762577if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&2578vif->type != NL80211_IFTYPE_ADHOC))2579return -EOPNOTSUPP;25802581/*2582* In IBSS, ieee80211_check_queues() sets the cab_queue to be2583* invalid, so make sure we use the queue we want.2584* Note that this is done here as we want to avoid making DQA2585* changes in mac80211 layer.2586*/2587if (vif->type == NL80211_IFTYPE_ADHOC)2588mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;25892590/*2591* While in previous FWs we had to exclude cab queue from TFD queue2592* mask, now it is needed as any other queue.2593*/2594if (!iwl_mvm_has_new_tx_api(mvm) &&2595fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {2596iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,2597&cfg,2598timeout);2599msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue);2600}2601ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,2602mvmvif->id, mvmvif->color);2603if (ret)2604goto err;26052606/*2607* Enable cab queue after the ADD_STA command is sent.2608* This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG2609* command with unknown station id, and for FW that doesn't support2610* station API since the cab queue is not included in the2611* tfd_queue_mask.2612*/2613if (iwl_mvm_has_new_tx_api(mvm)) {2614int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id,26150, timeout);2616if (queue < 0) {2617ret = queue;2618goto err;2619}2620mvmvif->deflink.cab_queue = queue;2621} else if (!fw_has_api(&mvm->fw->ucode_capa,2622IWL_UCODE_TLV_API_STA_TYPE))2623iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,2624&cfg,2625timeout);26262627return 0;2628err:2629iwl_mvm_dealloc_int_sta(mvm, msta);2630return ret;2631}26322633static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,2634struct ieee80211_key_conf *keyconf,2635bool mcast)2636{2637union {2638struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;2639struct iwl_mvm_add_sta_key_cmd cmd;2640} u = {};2641bool new_api = fw_has_api(&mvm->fw->ucode_capa,2642IWL_UCODE_TLV_API_TKIP_MIC_KEYS);2643__le16 key_flags;2644int ret, size;2645u32 status;26462647/* This is a valid situation for GTK removal */2648if (sta_id == IWL_INVALID_STA)2649return 0;26502651key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &2652STA_KEY_FLG_KEYID_MSK);2653key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);2654key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);26552656if (mcast)2657key_flags |= cpu_to_le16(STA_KEY_MULTICAST);26582659/*2660* The fields assigned here are in the same location at the start2661* of the command, so we can do this union trick.2662*/2663u.cmd.common.key_flags = key_flags;2664u.cmd.common.key_offset = keyconf->hw_key_idx;2665u.cmd.common.sta_id = sta_id;26662667size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);26682669status = ADD_STA_SUCCESS;2670ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,2671&status);26722673switch (status) {2674case ADD_STA_SUCCESS:2675IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");2676break;2677default:2678ret = -EIO;2679IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");2680break;2681}26822683return ret;2684}26852686/*2687* Send the FW a request to remove the station from its internal data2688* structures, and in addition remove it from the local data structure.2689*/2690int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)2691{2692struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);2693int ret;26942695lockdep_assert_held(&mvm->mutex);26962697iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,2698mvmvif->deflink.mcast_sta.tfd_queue_msk);26992700iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,2701&mvmvif->deflink.cab_queue, 0);27022703ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id);2704if (ret)2705IWL_WARN(mvm, "Failed sending remove station\n");27062707return ret;2708}27092710static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)2711{2712struct iwl_mvm_delba_data notif = {2713.baid = baid,2714};27152716iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,2717¬if, sizeof(notif));2718};27192720static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,2721struct iwl_mvm_baid_data *data)2722{2723int i;27242725iwl_mvm_sync_rxq_del_ba(mvm, data->baid);27262727for (i = 0; i < mvm->trans->info.num_rxqs; i++) {2728int j;2729struct iwl_mvm_reorder_buffer *reorder_buf =2730&data->reorder_buf[i];2731struct iwl_mvm_reorder_buf_entry *entries =2732&data->entries[i * data->entries_per_queue];27332734spin_lock_bh(&reorder_buf->lock);2735if (likely(!reorder_buf->num_stored)) {2736spin_unlock_bh(&reorder_buf->lock);2737continue;2738}27392740/*2741* This shouldn't happen in regular DELBA since the internal2742* delBA notification should trigger a release of all frames in2743* the reorder buffer.2744*/2745WARN_ON(1);27462747for (j = 0; j < data->buf_size; j++)2748__skb_queue_purge(&entries[j].frames);27492750spin_unlock_bh(&reorder_buf->lock);2751}2752}27532754static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,2755struct iwl_mvm_baid_data *data,2756u16 ssn)2757{2758int i;27592760for (i = 0; i < mvm->trans->info.num_rxqs; i++) {2761struct iwl_mvm_reorder_buffer *reorder_buf =2762&data->reorder_buf[i];2763struct iwl_mvm_reorder_buf_entry *entries =2764&data->entries[i * data->entries_per_queue];2765int j;27662767reorder_buf->num_stored = 0;2768reorder_buf->head_sn = ssn;2769spin_lock_init(&reorder_buf->lock);2770reorder_buf->queue = i;2771reorder_buf->valid = false;2772for (j = 0; j < data->buf_size; j++)2773__skb_queue_head_init(&entries[j].frames);2774}2775}27762777static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,2778struct ieee80211_sta *sta,2779bool start, int tid, u16 ssn,2780u16 buf_size)2781{2782struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);2783struct iwl_mvm_add_sta_cmd cmd = {2784.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),2785.sta_id = mvm_sta->deflink.sta_id,2786.add_modify = STA_MODE_MODIFY,2787};2788u32 status;2789int ret;27902791if (start) {2792cmd.add_immediate_ba_tid = tid;2793cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);2794cmd.rx_ba_window = cpu_to_le16(buf_size);2795cmd.modify_mask = STA_MODIFY_ADD_BA_TID;2796} else {2797cmd.remove_immediate_ba_tid = tid;2798cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;2799}28002801status = ADD_STA_SUCCESS;2802ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,2803iwl_mvm_add_sta_cmd_size(mvm),2804&cmd, &status);2805if (ret)2806return ret;28072808switch (status & IWL_ADD_STA_STATUS_MASK) {2809case ADD_STA_SUCCESS:2810IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",2811start ? "start" : "stopp");2812if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&2813!(status & IWL_ADD_STA_BAID_VALID_MASK)))2814return -EINVAL;2815return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);2816case ADD_STA_IMMEDIATE_BA_FAILURE:2817IWL_WARN(mvm, "RX BA Session refused by fw\n");2818return -ENOSPC;2819default:2820IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",2821start ? "start" : "stopp", status);2822return -EIO;2823}2824}28252826static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,2827struct ieee80211_sta *sta,2828bool start, int tid, u16 ssn,2829u16 buf_size, int baid)2830{2831struct iwl_rx_baid_cfg_cmd cmd = {2832.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :2833cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),2834};2835struct iwl_host_cmd hcmd = {2836.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),2837.flags = CMD_SEND_IN_RFKILL,2838.len[0] = sizeof(cmd),2839.data[0] = &cmd,2840};2841int ret;28422843BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));28442845if (start) {2846cmd.alloc.sta_id_mask =2847cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));2848cmd.alloc.tid = tid;2849cmd.alloc.ssn = cpu_to_le16(ssn);2850cmd.alloc.win_size = cpu_to_le16(buf_size);2851baid = -EIO;2852} else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {2853cmd.remove_v1.baid = cpu_to_le32(baid);2854BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));2855} else {2856cmd.remove.sta_id_mask =2857cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));2858cmd.remove.tid = cpu_to_le32(tid);2859}28602861ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);2862if (ret)2863return ret;28642865if (!start) {2866/* ignore firmware baid on remove */2867baid = 0;2868}28692870IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",2871start ? "start" : "stopp");28722873if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))2874return -EINVAL;28752876return baid;2877}28782879static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta,2880bool start, int tid, u16 ssn, u16 buf_size,2881int baid)2882{2883if (fw_has_capa(&mvm->fw->ucode_capa,2884IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))2885return iwl_mvm_fw_baid_op_cmd(mvm, sta, start,2886tid, ssn, buf_size, baid);28872888return iwl_mvm_fw_baid_op_sta(mvm, sta, start,2889tid, ssn, buf_size);2890}28912892int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,2893int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)2894{2895struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);2896struct iwl_mvm_baid_data *baid_data = NULL;2897int ret, baid;2898u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :2899IWL_MAX_BAID_OLD;29002901lockdep_assert_held(&mvm->mutex);29022903if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {2904IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");2905return -ENOSPC;2906}29072908if (iwl_mvm_has_new_rx_api(mvm) && start) {2909u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);29102911/* sparse doesn't like the __align() so don't check */2912#ifndef __CHECKER__2913/*2914* The division below will be OK if either the cache line size2915* can be divided by the entry size (ALIGN will round up) or if2916* the entry size can be divided by the cache line size, in2917* which case the ALIGN() will do nothing.2918*/2919BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&2920sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);2921#endif29222923/*2924* Upward align the reorder buffer size to fill an entire cache2925* line for each queue, to avoid sharing cache lines between2926* different queues.2927*/2928reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);29292930/*2931* Allocate here so if allocation fails we can bail out early2932* before starting the BA session in the firmware2933*/2934baid_data = kzalloc(sizeof(*baid_data) +2935mvm->trans->info.num_rxqs *2936reorder_buf_size,2937GFP_KERNEL);2938if (!baid_data)2939return -ENOMEM;29402941/*2942* This division is why we need the above BUILD_BUG_ON(),2943* if that doesn't hold then this will not be right.2944*/2945baid_data->entries_per_queue =2946reorder_buf_size / sizeof(baid_data->entries[0]);2947}29482949if (iwl_mvm_has_new_rx_api(mvm) && !start) {2950baid = mvm_sta->tid_to_baid[tid];2951} else {2952/* we don't really need it in this case */2953baid = -1;2954}29552956/* Don't send command to remove (start=0) BAID during restart */2957if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))2958baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size,2959baid);29602961if (baid < 0) {2962ret = baid;2963goto out_free;2964}29652966if (start) {2967mvm->rx_ba_sessions++;29682969if (!iwl_mvm_has_new_rx_api(mvm))2970return 0;29712972baid_data->baid = baid;2973baid_data->timeout = timeout;2974baid_data->last_rx = jiffies;2975baid_data->rcu_ptr = &mvm->baid_map[baid];2976timer_setup(&baid_data->session_timer,2977iwl_mvm_rx_agg_session_expired, 0);2978baid_data->mvm = mvm;2979baid_data->tid = tid;2980baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);2981baid_data->buf_size = buf_size;29822983mvm_sta->tid_to_baid[tid] = baid;2984if (timeout)2985mod_timer(&baid_data->session_timer,2986TU_TO_EXP_TIME(timeout * 2));29872988iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);2989/*2990* protect the BA data with RCU to cover a case where our2991* internal RX sync mechanism will timeout (not that it's2992* supposed to happen) and we will free the session data while2993* RX is being processed in parallel2994*/2995IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",2996mvm_sta->deflink.sta_id, tid, baid);2997WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));2998rcu_assign_pointer(mvm->baid_map[baid], baid_data);2999} else {3000baid = mvm_sta->tid_to_baid[tid];30013002if (mvm->rx_ba_sessions > 0)3003/* check that restart flow didn't zero the counter */3004mvm->rx_ba_sessions--;3005if (!iwl_mvm_has_new_rx_api(mvm))3006return 0;30073008if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))3009return -EINVAL;30103011baid_data = rcu_access_pointer(mvm->baid_map[baid]);3012if (WARN_ON(!baid_data))3013return -EINVAL;30143015/* synchronize all rx queues so we can safely delete */3016iwl_mvm_free_reorder(mvm, baid_data);3017timer_shutdown_sync(&baid_data->session_timer);3018RCU_INIT_POINTER(mvm->baid_map[baid], NULL);3019kfree_rcu(baid_data, rcu_head);3020IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);3021}3022return 0;30233024out_free:3025kfree(baid_data);3026return ret;3027}30283029int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,3030int tid, u8 queue, bool start)3031{3032struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);3033struct iwl_mvm_add_sta_cmd cmd = {};3034int ret;3035u32 status;30363037lockdep_assert_held(&mvm->mutex);30383039if (start) {3040mvm_sta->tfd_queue_msk |= BIT(queue);3041mvm_sta->tid_disable_agg &= ~BIT(tid);3042} else {3043/* In DQA-mode the queue isn't removed on agg termination */3044mvm_sta->tid_disable_agg |= BIT(tid);3045}30463047cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);3048cmd.sta_id = mvm_sta->deflink.sta_id;3049cmd.add_modify = STA_MODE_MODIFY;3050if (!iwl_mvm_has_new_tx_api(mvm))3051cmd.modify_mask = STA_MODIFY_QUEUES;3052cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;3053cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);3054cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);30553056status = ADD_STA_SUCCESS;3057ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,3058iwl_mvm_add_sta_cmd_size(mvm),3059&cmd, &status);3060if (ret)3061return ret;30623063switch (status & IWL_ADD_STA_STATUS_MASK) {3064case ADD_STA_SUCCESS:3065break;3066default:3067ret = -EIO;3068IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",3069start ? "start" : "stopp", status);3070break;3071}30723073return ret;3074}30753076const u8 tid_to_mac80211_ac[] = {3077IEEE80211_AC_BE,3078IEEE80211_AC_BK,3079IEEE80211_AC_BK,3080IEEE80211_AC_BE,3081IEEE80211_AC_VI,3082IEEE80211_AC_VI,3083IEEE80211_AC_VO,3084IEEE80211_AC_VO,3085IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */3086};30873088static const u8 tid_to_ucode_ac[] = {3089AC_BE,3090AC_BK,3091AC_BK,3092AC_BE,3093AC_VI,3094AC_VI,3095AC_VO,3096AC_VO,3097};30983099int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,3100struct ieee80211_sta *sta, u16 tid, u16 *ssn)3101{3102struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);3103struct iwl_mvm_tid_data *tid_data;3104u16 normalized_ssn;3105u16 txq_id;3106int ret;31073108if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))3109return -EINVAL;31103111if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&3112mvmsta->tid_data[tid].state != IWL_AGG_OFF) {3113IWL_ERR(mvm,3114"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",3115mvmsta->tid_data[tid].state);3116return -ENXIO;3117}31183119lockdep_assert_held(&mvm->mutex);31203121if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&3122iwl_mvm_has_new_tx_api(mvm)) {3123u8 ac = tid_to_mac80211_ac[tid];31243125ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);3126if (ret)3127return ret;3128}31293130spin_lock_bh(&mvmsta->lock);31313132/*3133* Note the possible cases:3134* 1. An enabled TXQ - TXQ needs to become agg'ed3135* 2. The TXQ hasn't yet been enabled, so find a free one and mark3136* it as reserved3137*/3138txq_id = mvmsta->tid_data[tid].txq_id;3139if (txq_id == IWL_MVM_INVALID_QUEUE) {3140ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,3141IWL_MVM_DQA_MIN_DATA_QUEUE,3142IWL_MVM_DQA_MAX_DATA_QUEUE);3143if (ret < 0) {3144IWL_ERR(mvm, "Failed to allocate agg queue\n");3145goto out;3146}31473148txq_id = ret;31493150/* TXQ hasn't yet been enabled, so mark it only as reserved */3151mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;3152} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {3153ret = -ENXIO;3154IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",3155tid, IWL_MAX_HW_QUEUES - 1);3156goto out;31573158} else if (unlikely(mvm->queue_info[txq_id].status ==3159IWL_MVM_QUEUE_SHARED)) {3160ret = -ENXIO;3161IWL_DEBUG_TX_QUEUES(mvm,3162"Can't start tid %d agg on shared queue!\n",3163tid);3164goto out;3165}31663167IWL_DEBUG_TX_QUEUES(mvm,3168"AGG for tid %d will be on queue #%d\n",3169tid, txq_id);31703171tid_data = &mvmsta->tid_data[tid];3172tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);3173tid_data->txq_id = txq_id;3174*ssn = tid_data->ssn;31753176IWL_DEBUG_TX_QUEUES(mvm,3177"Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",3178mvmsta->deflink.sta_id, tid, txq_id,3179tid_data->ssn,3180tid_data->next_reclaimed);31813182/*3183* In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need3184* to align the wrap around of ssn so we compare relevant values.3185*/3186normalized_ssn = tid_data->ssn;3187if (mvm->trans->mac_cfg->gen2)3188normalized_ssn &= 0xff;31893190if (normalized_ssn == tid_data->next_reclaimed) {3191tid_data->state = IWL_AGG_STARTING;3192ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;3193} else {3194tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;3195ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;3196}31973198out:3199spin_unlock_bh(&mvmsta->lock);32003201return ret;3202}32033204int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,3205struct ieee80211_sta *sta, u16 tid, u16 buf_size,3206bool amsdu)3207{3208struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);3209struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];3210unsigned int wdg_timeout =3211iwl_mvm_get_wd_timeout(mvm, vif);3212int queue, ret;3213bool alloc_queue = true;3214enum iwl_mvm_queue_status queue_status;3215u16 ssn;32163217struct iwl_trans_txq_scd_cfg cfg = {3218.sta_id = mvmsta->deflink.sta_id,3219.tid = tid,3220.frame_limit = buf_size,3221.aggregate = true,3222};32233224/*3225* When FW supports TLC_OFFLOAD, it also implements Tx aggregation3226* manager, so this function should never be called in this case.3227*/3228if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))3229return -EINVAL;32303231BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)3232!= IWL_MAX_TID_COUNT);32333234spin_lock_bh(&mvmsta->lock);3235ssn = tid_data->ssn;3236queue = tid_data->txq_id;3237tid_data->state = IWL_AGG_ON;3238mvmsta->agg_tids |= BIT(tid);3239tid_data->ssn = 0xffff;3240tid_data->amsdu_in_ampdu_allowed = amsdu;3241spin_unlock_bh(&mvmsta->lock);32423243if (iwl_mvm_has_new_tx_api(mvm)) {3244/*3245* If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()3246* would have failed, so if we are here there is no need to3247* allocate a queue.3248* However, if aggregation size is different than the default3249* size, the scheduler should be reconfigured.3250* We cannot do this with the new TX API, so return unsupported3251* for now, until it will be offloaded to firmware..3252* Note that if SCD default value changes - this condition3253* should be updated as well.3254*/3255if (buf_size < IWL_FRAME_LIMIT)3256return -EOPNOTSUPP;32573258ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);3259if (ret)3260return -EIO;3261goto out;3262}32633264cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];32653266queue_status = mvm->queue_info[queue].status;32673268/* Maybe there is no need to even alloc a queue... */3269if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)3270alloc_queue = false;32713272/*3273* Only reconfig the SCD for the queue if the window size has3274* changed from current (become smaller)3275*/3276if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {3277/*3278* If reconfiguring an existing queue, it first must be3279* drained3280*/3281ret = iwl_trans_wait_tx_queues_empty(mvm->trans,3282BIT(queue));3283if (ret) {3284IWL_ERR(mvm,3285"Error draining queue before reconfig\n");3286return ret;3287}32883289ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,3290mvmsta->deflink.sta_id, tid,3291buf_size, ssn);3292if (ret) {3293IWL_ERR(mvm,3294"Error reconfiguring TXQ #%d\n", queue);3295return ret;3296}3297}32983299if (alloc_queue)3300iwl_mvm_enable_txq(mvm, sta, queue, ssn,3301&cfg, wdg_timeout);33023303/* Send ADD_STA command to enable aggs only if the queue isn't shared */3304if (queue_status != IWL_MVM_QUEUE_SHARED) {3305ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);3306if (ret)3307return -EIO;3308}33093310/* No need to mark as reserved */3311mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;33123313out:3314/*3315* Even though in theory the peer could have different3316* aggregation reorder buffer sizes for different sessions,3317* our ucode doesn't allow for that and has a global limit3318* for each station. Therefore, use the minimum of all the3319* aggregation sessions and our default value.3320*/3321mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =3322min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize,3323buf_size);3324mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit =3325mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize;33263327IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",3328sta->addr, tid);33293330return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq);3331}33323333static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,3334struct iwl_mvm_sta *mvmsta,3335struct iwl_mvm_tid_data *tid_data)3336{3337u16 txq_id = tid_data->txq_id;33383339lockdep_assert_held(&mvm->mutex);33403341if (iwl_mvm_has_new_tx_api(mvm))3342return;33433344/*3345* The TXQ is marked as reserved only if no traffic came through yet3346* This means no traffic has been sent on this TID (agg'd or not), so3347* we no longer have use for the queue. Since it hasn't even been3348* allocated through iwl_mvm_enable_txq, so we can just mark it back as3349* free.3350*/3351if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {3352mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;3353tid_data->txq_id = IWL_MVM_INVALID_QUEUE;3354}3355}33563357int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,3358struct ieee80211_sta *sta, u16 tid)3359{3360struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);3361struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];3362u16 txq_id;3363int err;33643365/*3366* If mac80211 is cleaning its state, then say that we finished since3367* our state has been cleared anyway.3368*/3369if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {3370ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);3371return 0;3372}33733374spin_lock_bh(&mvmsta->lock);33753376txq_id = tid_data->txq_id;33773378IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",3379mvmsta->deflink.sta_id, tid, txq_id,3380tid_data->state);33813382mvmsta->agg_tids &= ~BIT(tid);33833384iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);33853386switch (tid_data->state) {3387case IWL_AGG_ON:3388tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);33893390IWL_DEBUG_TX_QUEUES(mvm,3391"ssn = %d, next_recl = %d\n",3392tid_data->ssn, tid_data->next_reclaimed);33933394tid_data->ssn = 0xffff;3395tid_data->state = IWL_AGG_OFF;3396spin_unlock_bh(&mvmsta->lock);33973398ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);33993400iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);3401return 0;3402case IWL_AGG_STARTING:3403case IWL_EMPTYING_HW_QUEUE_ADDBA:3404/*3405* The agg session has been stopped before it was set up. This3406* can happen when the AddBA timer times out for example.3407*/34083409/* No barriers since we are under mutex */3410lockdep_assert_held(&mvm->mutex);34113412ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);3413tid_data->state = IWL_AGG_OFF;3414err = 0;3415break;3416default:3417IWL_ERR(mvm,3418"Stopping AGG while state not ON or starting for %d on %d (%d)\n",3419mvmsta->deflink.sta_id, tid, tid_data->state);3420IWL_ERR(mvm,3421"\ttid_data->txq_id = %d\n", tid_data->txq_id);3422err = -EINVAL;3423}34243425spin_unlock_bh(&mvmsta->lock);34263427return err;3428}34293430int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,3431struct ieee80211_sta *sta, u16 tid)3432{3433struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);3434struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];3435u16 txq_id;3436enum iwl_mvm_agg_state old_state;34373438/*3439* First set the agg state to OFF to avoid calling3440* ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.3441*/3442spin_lock_bh(&mvmsta->lock);3443txq_id = tid_data->txq_id;3444IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",3445mvmsta->deflink.sta_id, tid, txq_id,3446tid_data->state);3447old_state = tid_data->state;3448tid_data->state = IWL_AGG_OFF;3449mvmsta->agg_tids &= ~BIT(tid);3450spin_unlock_bh(&mvmsta->lock);34513452iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);34533454if (old_state >= IWL_AGG_ON) {3455iwl_mvm_drain_sta(mvm, mvmsta, true);34563457if (iwl_mvm_has_new_tx_api(mvm)) {3458if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id,3459BIT(tid)))3460IWL_ERR(mvm, "Couldn't flush the AGG queue\n");3461iwl_trans_wait_txq_empty(mvm->trans, txq_id);3462} else {3463if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))3464IWL_ERR(mvm, "Couldn't flush the AGG queue\n");3465iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));3466}34673468iwl_mvm_drain_sta(mvm, mvmsta, false);34693470iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);3471}34723473return 0;3474}34753476static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)3477{3478int i, max = -1, max_offs = -1;34793480lockdep_assert_held(&mvm->mutex);34813482/* Pick the unused key offset with the highest 'deleted'3483* counter. Every time a key is deleted, all the counters3484* are incremented and the one that was just deleted is3485* reset to zero. Thus, the highest counter is the one3486* that was deleted longest ago. Pick that one.3487*/3488for (i = 0; i < STA_KEY_MAX_NUM; i++) {3489if (test_bit(i, mvm->fw_key_table))3490continue;3491if (mvm->fw_key_deleted[i] > max) {3492max = mvm->fw_key_deleted[i];3493max_offs = i;3494}3495}34963497if (max_offs < 0)3498return STA_KEY_IDX_INVALID;34993500return max_offs;3501}35023503static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,3504struct ieee80211_vif *vif,3505struct ieee80211_sta *sta)3506{3507struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);35083509if (sta)3510return iwl_mvm_sta_from_mac80211(sta);35113512/*3513* The device expects GTKs for station interfaces to be3514* installed as GTKs for the AP station. If we have no3515* station ID, then use AP's station ID.3516*/3517if (vif->type == NL80211_IFTYPE_STATION &&3518mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {3519u8 sta_id = mvmvif->deflink.ap_sta_id;35203521sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],3522lockdep_is_held(&mvm->mutex));35233524/*3525* It is possible that the 'sta' parameter is NULL,3526* for example when a GTK is removed - the sta_id will then3527* be the AP ID, and no station was passed by mac80211.3528*/3529if (IS_ERR_OR_NULL(sta))3530return NULL;35313532return iwl_mvm_sta_from_mac80211(sta);3533}35343535return NULL;3536}35373538static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)3539{3540int i;35413542for (i = len - 1; i >= 0; i--) {3543if (pn1[i] > pn2[i])3544return 1;3545if (pn1[i] < pn2[i])3546return -1;3547}35483549return 0;3550}35513552static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,3553u32 sta_id,3554struct ieee80211_key_conf *key, bool mcast,3555u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,3556u8 key_offset, bool mfp)3557{3558union {3559struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;3560struct iwl_mvm_add_sta_key_cmd cmd;3561} u = {};3562__le16 key_flags;3563int ret;3564u32 status;3565u16 keyidx;3566u64 pn = 0;3567int i, size;3568bool new_api = fw_has_api(&mvm->fw->ucode_capa,3569IWL_UCODE_TLV_API_TKIP_MIC_KEYS);3570int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,3571new_api ? 2 : 1);35723573if (sta_id == IWL_INVALID_STA)3574return -EINVAL;35753576keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &3577STA_KEY_FLG_KEYID_MSK;3578key_flags = cpu_to_le16(keyidx);3579key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);35803581if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)3582key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);35833584switch (key->cipher) {3585case WLAN_CIPHER_SUITE_TKIP:3586key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);3587if (api_ver >= 2) {3588memcpy((void *)&u.cmd.tx_mic_key,3589&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],3590IWL_MIC_KEY_SIZE);35913592memcpy((void *)&u.cmd.rx_mic_key,3593&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],3594IWL_MIC_KEY_SIZE);3595pn = atomic64_read(&key->tx_pn);35963597} else {3598u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;3599for (i = 0; i < 5; i++)3600u.cmd_v1.tkip_rx_ttak[i] =3601cpu_to_le16(tkip_p1k[i]);3602}3603memcpy(u.cmd.common.key, key->key, key->keylen);3604break;3605case WLAN_CIPHER_SUITE_CCMP:3606key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);3607memcpy(u.cmd.common.key, key->key, key->keylen);3608if (api_ver >= 2)3609pn = atomic64_read(&key->tx_pn);3610break;3611case WLAN_CIPHER_SUITE_WEP104:3612key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);3613fallthrough;3614case WLAN_CIPHER_SUITE_WEP40:3615key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);3616memcpy(u.cmd.common.key + 3, key->key, key->keylen);3617break;3618case WLAN_CIPHER_SUITE_GCMP_256:3619key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);3620fallthrough;3621case WLAN_CIPHER_SUITE_GCMP:3622key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);3623memcpy(u.cmd.common.key, key->key, key->keylen);3624if (api_ver >= 2)3625pn = atomic64_read(&key->tx_pn);3626break;3627default:3628key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);3629memcpy(u.cmd.common.key, key->key, key->keylen);3630}36313632if (mcast)3633key_flags |= cpu_to_le16(STA_KEY_MULTICAST);3634if (mfp)3635key_flags |= cpu_to_le16(STA_KEY_MFP);36363637u.cmd.common.key_offset = key_offset;3638u.cmd.common.key_flags = key_flags;3639u.cmd.common.sta_id = sta_id;36403641if (key->cipher == WLAN_CIPHER_SUITE_TKIP)3642i = 0;3643else3644i = -1;36453646for (; i < IEEE80211_NUM_TIDS; i++) {3647struct ieee80211_key_seq seq = {};3648u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;3649int rx_pn_len = 8;3650/* there's a hole at 2/3 in FW format depending on version */3651int hole = api_ver >= 3 ? 0 : 2;36523653ieee80211_get_key_rx_seq(key, i, &seq);36543655if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {3656rx_pn[0] = seq.tkip.iv16;3657rx_pn[1] = seq.tkip.iv16 >> 8;3658rx_pn[2 + hole] = seq.tkip.iv32;3659rx_pn[3 + hole] = seq.tkip.iv32 >> 8;3660rx_pn[4 + hole] = seq.tkip.iv32 >> 16;3661rx_pn[5 + hole] = seq.tkip.iv32 >> 24;3662} else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {3663rx_pn = seq.hw.seq;3664rx_pn_len = seq.hw.seq_len;3665} else {3666rx_pn[0] = seq.ccmp.pn[0];3667rx_pn[1] = seq.ccmp.pn[1];3668rx_pn[2 + hole] = seq.ccmp.pn[2];3669rx_pn[3 + hole] = seq.ccmp.pn[3];3670rx_pn[4 + hole] = seq.ccmp.pn[4];3671rx_pn[5 + hole] = seq.ccmp.pn[5];3672}36733674if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,3675rx_pn_len) > 0)3676memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,3677rx_pn_len);3678}36793680if (api_ver >= 2) {3681u.cmd.transmit_seq_cnt = cpu_to_le64(pn);3682size = sizeof(u.cmd);3683} else {3684size = sizeof(u.cmd_v1);3685}36863687status = ADD_STA_SUCCESS;3688if (cmd_flags & CMD_ASYNC)3689ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,3690&u.cmd);3691else3692ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,3693&u.cmd, &status);36943695switch (status) {3696case ADD_STA_SUCCESS:3697IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");3698break;3699default:3700ret = -EIO;3701IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");3702break;3703}37043705return ret;3706}37073708static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,3709struct ieee80211_key_conf *keyconf,3710u8 sta_id, bool remove_key)3711{3712struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};37133714/* verify the key details match the required command's expectations */3715if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||3716(keyconf->keyidx != 4 && keyconf->keyidx != 5 &&3717keyconf->keyidx != 6 && keyconf->keyidx != 7) ||3718(keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&3719keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&3720keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))3721return -EINVAL;37223723if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&3724keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))3725return -EINVAL;37263727igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);3728igtk_cmd.sta_id = cpu_to_le32(sta_id);37293730if (remove_key) {3731/* This is a valid situation for IGTK */3732if (sta_id == IWL_INVALID_STA)3733return 0;37343735igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);3736} else {3737struct ieee80211_key_seq seq;3738const u8 *pn;37393740switch (keyconf->cipher) {3741case WLAN_CIPHER_SUITE_AES_CMAC:3742igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);3743break;3744case WLAN_CIPHER_SUITE_BIP_GMAC_128:3745case WLAN_CIPHER_SUITE_BIP_GMAC_256:3746igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);3747break;3748default:3749return -EINVAL;3750}37513752memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);3753if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)3754igtk_cmd.ctrl_flags |=3755cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);3756ieee80211_get_key_rx_seq(keyconf, 0, &seq);3757pn = seq.aes_cmac.pn;3758igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |3759((u64) pn[4] << 8) |3760((u64) pn[3] << 16) |3761((u64) pn[2] << 24) |3762((u64) pn[1] << 32) |3763((u64) pn[0] << 40));3764}37653766IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",3767remove_key ? "removing" : "installing",3768keyconf->keyidx >= 6 ? "B" : "",3769keyconf->keyidx, igtk_cmd.sta_id);37703771if (!iwl_mvm_has_new_rx_api(mvm)) {3772struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {3773.ctrl_flags = igtk_cmd.ctrl_flags,3774.key_id = igtk_cmd.key_id,3775.sta_id = igtk_cmd.sta_id,3776.receive_seq_cnt = igtk_cmd.receive_seq_cnt3777};37783779memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,3780ARRAY_SIZE(igtk_cmd_v1.igtk));3781return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,3782sizeof(igtk_cmd_v1), &igtk_cmd_v1);3783}3784return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,3785sizeof(igtk_cmd), &igtk_cmd);3786}378737883789static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,3790struct ieee80211_vif *vif,3791struct ieee80211_sta *sta)3792{3793struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);37943795if (sta)3796return sta->addr;37973798if (vif->type == NL80211_IFTYPE_STATION &&3799mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {3800u8 sta_id = mvmvif->deflink.ap_sta_id;3801sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],3802lockdep_is_held(&mvm->mutex));3803if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))3804return NULL;38053806return sta->addr;3807}380838093810return NULL;3811}38123813static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,3814struct ieee80211_vif *vif,3815struct ieee80211_sta *sta,3816struct ieee80211_key_conf *keyconf,3817u8 key_offset,3818bool mcast)3819{3820const u8 *addr;3821struct ieee80211_key_seq seq;3822u16 p1k[5];3823u32 sta_id;3824bool mfp = false;38253826if (sta) {3827struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);38283829sta_id = mvm_sta->deflink.sta_id;3830mfp = sta->mfp;3831} else if (vif->type == NL80211_IFTYPE_AP &&3832!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {3833struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);38343835sta_id = mvmvif->deflink.mcast_sta.sta_id;3836} else {3837IWL_ERR(mvm, "Failed to find station id\n");3838return -EINVAL;3839}38403841if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {3842addr = iwl_mvm_get_mac_addr(mvm, vif, sta);3843if (!addr) {3844IWL_ERR(mvm, "Failed to find mac address\n");3845return -EINVAL;3846}38473848/* get phase 1 key from mac80211 */3849ieee80211_get_key_rx_seq(keyconf, 0, &seq);3850ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);38513852return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,3853seq.tkip.iv32, p1k, 0, key_offset,3854mfp);3855}38563857return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,38580, NULL, 0, key_offset, mfp);3859}38603861int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,3862struct ieee80211_vif *vif,3863struct ieee80211_sta *sta,3864struct ieee80211_key_conf *keyconf,3865u8 key_offset)3866{3867bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);3868struct iwl_mvm_sta *mvm_sta;3869u8 sta_id = IWL_INVALID_STA;3870int ret;3871static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};38723873lockdep_assert_held(&mvm->mutex);38743875if (vif->type != NL80211_IFTYPE_AP ||3876keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {3877/* Get the station id from the mvm local station table */3878mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);3879if (!mvm_sta) {3880IWL_ERR(mvm, "Failed to find station\n");3881return -EINVAL;3882}3883sta_id = mvm_sta->deflink.sta_id;38843885/*3886* It is possible that the 'sta' parameter is NULL, and thus3887* there is a need to retrieve the sta from the local station3888* table.3889*/3890if (!sta) {3891sta = rcu_dereference_protected(3892mvm->fw_id_to_mac_id[sta_id],3893lockdep_is_held(&mvm->mutex));3894if (IS_ERR_OR_NULL(sta)) {3895IWL_ERR(mvm, "Invalid station id\n");3896return -EINVAL;3897}3898}38993900if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))3901return -EINVAL;3902} else {3903struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);39043905sta_id = mvmvif->deflink.mcast_sta.sta_id;3906}39073908if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||3909keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||3910keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {3911ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);3912goto end;3913}39143915/* If the key_offset is not pre-assigned, we need to find a3916* new offset to use. In normal cases, the offset is not3917* pre-assigned, but during HW_RESTART we want to reuse the3918* same indices, so we pass them when this function is called.3919*3920* In D3 entry, we need to hardcoded the indices (because the3921* firmware hardcodes the PTK offset to 0). In this case, we3922* need to make sure we don't overwrite the hw_key_idx in the3923* keyconf structure, because otherwise we cannot configure3924* the original ones back when resuming.3925*/3926if (key_offset == STA_KEY_IDX_INVALID) {3927key_offset = iwl_mvm_set_fw_key_idx(mvm);3928if (key_offset == STA_KEY_IDX_INVALID)3929return -ENOSPC;3930keyconf->hw_key_idx = key_offset;3931}39323933ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);3934if (ret)3935goto end;39363937/*3938* For WEP, the same key is used for multicast and unicast. Upload it3939* again, using the same key offset, and now pointing the other one3940* to the same key slot (offset).3941* If this fails, remove the original as well.3942*/3943if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||3944keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&3945sta) {3946ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,3947key_offset, !mcast);3948if (ret) {3949__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);3950goto end;3951}3952}39533954__set_bit(key_offset, mvm->fw_key_table);39553956end:3957IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",3958keyconf->cipher, keyconf->keylen, keyconf->keyidx,3959sta ? sta->addr : zero_addr, ret);3960return ret;3961}39623963int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,3964struct ieee80211_vif *vif,3965struct ieee80211_sta *sta,3966struct ieee80211_key_conf *keyconf)3967{3968bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);3969struct iwl_mvm_sta *mvm_sta;3970u8 sta_id = IWL_INVALID_STA;3971int ret, i;39723973lockdep_assert_held(&mvm->mutex);39743975/* Get the station from the mvm local station table */3976mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);3977if (mvm_sta)3978sta_id = mvm_sta->deflink.sta_id;3979else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)3980sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id;398139823983IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",3984keyconf->keyidx, sta_id);39853986if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||3987keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||3988keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)3989return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);39903991if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {3992IWL_ERR(mvm, "offset %d not used in fw key table.\n",3993keyconf->hw_key_idx);3994return -ENOENT;3995}39963997/* track which key was deleted last */3998for (i = 0; i < STA_KEY_MAX_NUM; i++) {3999if (mvm->fw_key_deleted[i] < U8_MAX)4000mvm->fw_key_deleted[i]++;4001}4002mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;40034004if (sta && !mvm_sta) {4005IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");4006return 0;4007}40084009ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);4010if (ret)4011return ret;40124013/* delete WEP key twice to get rid of (now useless) offset */4014if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||4015keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)4016ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);40174018return ret;4019}40204021void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,4022struct ieee80211_vif *vif,4023struct ieee80211_key_conf *keyconf,4024struct ieee80211_sta *sta, u32 iv32,4025u16 *phase1key)4026{4027struct iwl_mvm_sta *mvm_sta;4028bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);4029bool mfp = sta ? sta->mfp : false;40304031rcu_read_lock();40324033mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);4034if (WARN_ON_ONCE(!mvm_sta))4035goto unlock;4036iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast,4037iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,4038mfp);40394040unlock:4041rcu_read_unlock();4042}40434044void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,4045struct ieee80211_sta *sta)4046{4047struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);4048struct iwl_mvm_add_sta_cmd cmd = {4049.add_modify = STA_MODE_MODIFY,4050.sta_id = mvmsta->deflink.sta_id,4051.station_flags_msk = cpu_to_le32(STA_FLG_PS),4052.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),4053};4054int ret;40554056ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,4057iwl_mvm_add_sta_cmd_size(mvm), &cmd);4058if (ret)4059IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);4060}40614062void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,4063struct ieee80211_sta *sta,4064enum ieee80211_frame_release_type reason,4065u16 cnt, u16 tids, bool more_data,4066bool single_sta_queue)4067{4068struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);4069struct iwl_mvm_add_sta_cmd cmd = {4070.add_modify = STA_MODE_MODIFY,4071.sta_id = mvmsta->deflink.sta_id,4072.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,4073.sleep_tx_count = cpu_to_le16(cnt),4074.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),4075};4076int tid, ret;4077unsigned long _tids = tids;40784079/* convert TIDs to ACs - we don't support TSPEC so that's OK4080* Note that this field is reserved and unused by firmware not4081* supporting GO uAPSD, so it's safe to always do this.4082*/4083for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)4084cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);40854086/* If we're releasing frames from aggregation or dqa queues then check4087* if all the queues that we're releasing frames from, combined, have:4088* - more frames than the service period, in which case more_data4089* needs to be set4090* - fewer than 'cnt' frames, in which case we need to adjust the4091* firmware command (but do that unconditionally)4092*/4093if (single_sta_queue) {4094int remaining = cnt;4095int sleep_tx_count;40964097spin_lock_bh(&mvmsta->lock);4098for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {4099struct iwl_mvm_tid_data *tid_data;4100u16 n_queued;41014102tid_data = &mvmsta->tid_data[tid];41034104n_queued = iwl_mvm_tid_queued(mvm, tid_data);4105if (n_queued > remaining) {4106more_data = true;4107remaining = 0;4108break;4109}4110remaining -= n_queued;4111}4112sleep_tx_count = cnt - remaining;4113if (reason == IEEE80211_FRAME_RELEASE_UAPSD)4114mvmsta->sleep_tx_count = sleep_tx_count;4115spin_unlock_bh(&mvmsta->lock);41164117cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);4118if (WARN_ON(cnt - remaining == 0)) {4119ieee80211_sta_eosp(sta);4120return;4121}4122}41234124/* Note: this is ignored by firmware not supporting GO uAPSD */4125if (more_data)4126cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;41274128if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {4129mvmsta->next_status_eosp = true;4130cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;4131} else {4132cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;4133}41344135/* block the Tx queues until the FW updated the sleep Tx count */4136ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,4137CMD_ASYNC | CMD_BLOCK_TXQS,4138iwl_mvm_add_sta_cmd_size(mvm), &cmd);4139if (ret)4140IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);4141}41424143void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,4144struct iwl_rx_cmd_buffer *rxb)4145{4146struct iwl_rx_packet *pkt = rxb_addr(rxb);4147struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;4148struct ieee80211_sta *sta;4149u32 sta_id = le32_to_cpu(notif->sta_id);41504151if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))4152return;41534154rcu_read_lock();4155sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);4156if (!IS_ERR_OR_NULL(sta))4157ieee80211_sta_eosp(sta);4158rcu_read_unlock();4159}41604161void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,4162struct iwl_mvm_sta *mvmsta,4163bool disable)4164{4165struct iwl_mvm_add_sta_cmd cmd = {4166.add_modify = STA_MODE_MODIFY,4167.sta_id = mvmsta->deflink.sta_id,4168.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,4169.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),4170.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),4171};4172int ret;41734174if (mvm->mld_api_is_used) {4175if (!iwl_mvm_has_no_host_disable_tx(mvm))4176iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);4177return;4178}41794180ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,4181iwl_mvm_add_sta_cmd_size(mvm), &cmd);4182if (ret)4183IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);4184}41854186void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,4187struct ieee80211_sta *sta,4188bool disable)4189{4190struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);41914192if (mvm->mld_api_is_used) {4193if (!iwl_mvm_has_no_host_disable_tx(mvm))4194iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);4195return;4196}41974198spin_lock_bh(&mvm_sta->lock);41994200if (mvm_sta->disable_tx == disable) {4201spin_unlock_bh(&mvm_sta->lock);4202return;4203}42044205mvm_sta->disable_tx = disable;42064207/*4208* If sta PS state is handled by mac80211, tell it to start/stop4209* queuing tx for this station.4210*/4211if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))4212ieee80211_sta_block_awake(mvm->hw, sta, disable);42134214iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);42154216spin_unlock_bh(&mvm_sta->lock);4217}42184219static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,4220struct iwl_mvm_vif *mvmvif,4221struct iwl_mvm_int_sta *sta,4222bool disable)4223{4224u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);4225struct iwl_mvm_add_sta_cmd cmd = {4226.add_modify = STA_MODE_MODIFY,4227.sta_id = sta->sta_id,4228.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,4229.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),4230.mac_id_n_color = cpu_to_le32(id),4231};4232int ret;42334234ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,4235iwl_mvm_add_sta_cmd_size(mvm), &cmd);4236if (ret)4237IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);4238}42394240void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,4241struct iwl_mvm_vif *mvmvif,4242bool disable)4243{4244struct ieee80211_sta *sta;4245struct iwl_mvm_sta *mvm_sta;4246int i;42474248if (mvm->mld_api_is_used) {4249if (!iwl_mvm_has_no_host_disable_tx(mvm))4250iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif,4251disable);4252return;4253}42544255rcu_read_lock();42564257/* Block/unblock all the stations of the given mvmvif */4258for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {4259sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);4260if (IS_ERR_OR_NULL(sta))4261continue;42624263mvm_sta = iwl_mvm_sta_from_mac80211(sta);4264if (mvm_sta->mac_id_n_color !=4265FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))4266continue;42674268iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);4269}42704271rcu_read_unlock();42724273if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))4274return;42754276/* Need to block/unblock also multicast station */4277if (mvmvif->deflink.mcast_sta.sta_id != IWL_INVALID_STA)4278iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,4279&mvmvif->deflink.mcast_sta,4280disable);42814282/*4283* Only unblock the broadcast station (FW blocks it for immediate4284* quiet, not the driver)4285*/4286if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_INVALID_STA)4287iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,4288&mvmvif->deflink.bcast_sta,4289disable);4290}42914292void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)4293{4294struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);4295struct iwl_mvm_sta *mvmsta;42964297rcu_read_lock();42984299mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id);43004301if (mvmsta)4302iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);43034304rcu_read_unlock();4305}43064307u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)4308{4309u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);43104311/*4312* In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need4313* to align the wrap around of ssn so we compare relevant values.4314*/4315if (mvm->trans->mac_cfg->gen2)4316sn &= 0xff;43174318return ieee80211_sn_sub(sn, tid_data->next_reclaimed);4319}43204321void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,4322struct ieee80211_vif *vif,4323u32 id)4324{4325struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {4326.id = cpu_to_le32(id),4327};4328int ret;43294330ret = iwl_mvm_send_cmd_pdu(mvm,4331WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),4332CMD_ASYNC,4333sizeof(cancel_channel_switch_cmd),4334&cancel_channel_switch_cmd);4335if (ret)4336IWL_ERR(mvm, "Failed to cancel the channel switch\n");4337}43384339static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,4340u8 fw_sta_id)4341{4342struct ieee80211_link_sta *link_sta =4343rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);4344struct iwl_mvm_vif_link_info *link;43454346if (WARN_ON_ONCE(!link_sta))4347return -EINVAL;43484349link = mvmvif->link[link_sta->link_id];43504351if (WARN_ON_ONCE(!link))4352return -EINVAL;43534354return link->fw_link_id;4355}43564357#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)43584359void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,4360bool tx, int queue)4361{4362struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);4363struct iwl_mvm *mvm = mvmvif->mvm;4364struct iwl_mvm_tpt_counter *queue_counter;4365struct iwl_mvm_mpdu_counter *link_counter;4366u32 total_mpdus = 0;4367int fw_link_id;43684369/* Count only for a BSS sta, and only when EMLSR is possible */4370if (!mvm_sta->mpdu_counters)4371return;43724373/* Map sta id to link id */4374fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);4375if (fw_link_id < 0)4376return;43774378queue_counter = &mvm_sta->mpdu_counters[queue];4379link_counter = &queue_counter->per_link[fw_link_id];43804381spin_lock_bh(&queue_counter->lock);43824383if (tx)4384link_counter->tx += count;4385else4386link_counter->rx += count;43874388/*4389* When not in EMLSR, the window and the decision to enter EMLSR are4390* handled during counting, when in EMLSR - in the statistics flow4391*/4392if (mvmvif->esr_active)4393goto out;43944395if (time_is_before_jiffies(queue_counter->window_start +4396IWL_MVM_TPT_COUNT_WINDOW)) {4397memset(queue_counter->per_link, 0,4398sizeof(queue_counter->per_link));4399queue_counter->window_start = jiffies;44004401IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");4402}44034404for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)4405total_mpdus += tx ? queue_counter->per_link[i].tx :4406queue_counter->per_link[i].rx;44074408if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)4409wiphy_work_queue(mvmvif->mvm->hw->wiphy,4410&mvmvif->unblock_esr_tpt_wk);44114412out:4413spin_unlock_bh(&queue_counter->lock);4414}441544164417