Path: blob/main/sys/contrib/dev/iwlwifi/pcie/ctxt-info-v2.c
48372 views
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause1/*2* Copyright (C) 2018-2025 Intel Corporation3*/4#include <linux/dmi.h>5#include "iwl-trans.h"6#include "iwl-fh.h"7#include "iwl-context-info-v2.h"8#include "gen1_2/internal.h"9#include "iwl-prph.h"1011static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {12{ .ident = "DELL",13.matches = {14DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),15},16},17{ .ident = "DELL",18.matches = {19DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),20},21},22/* keep last */23{}24};2526static bool iwl_is_force_scu_active_approved(void)27{28return !!dmi_check_system(dmi_force_scu_active_approved_list);29}3031static void32iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,33struct iwl_prph_scratch_hwm_cfg *dbg_cfg,34u32 *control_flags)35{36enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;37struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;38u32 dbg_flags = 0;3940if (!iwl_trans_dbg_ini_valid(trans)) {41struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;4243iwl_pcie_alloc_fw_monitor(trans, 0);4445if (fw_mon->size) {46dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;4748IWL_DEBUG_FW(trans,49"WRT: Applying DRAM buffer destination\n");5051dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);52dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);53}5455goto out;56}5758fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];5960switch (le32_to_cpu(fw_mon_cfg->buf_location)) {61case IWL_FW_INI_LOCATION_SRAM_PATH:62dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;63IWL_DEBUG_FW(trans,64"WRT: Applying SMEM buffer destination\n");65break;6667case IWL_FW_INI_LOCATION_NPK_PATH:68dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;69IWL_DEBUG_FW(trans,70"WRT: Applying NPK buffer destination\n");71break;7273case IWL_FW_INI_LOCATION_DRAM_PATH:74if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {75struct iwl_dram_data *frag =76&trans->dbg.fw_mon_ini[alloc_id].frags[0];77dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;78dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);79dbg_cfg->hwm_size = cpu_to_le32(frag->size);80dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);81IWL_DEBUG_FW(trans,82"WRT: Applying DRAM destination (debug_token_config=%u)\n",83dbg_cfg->debug_token_config);84IWL_DEBUG_FW(trans,85"WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",86alloc_id,87trans->dbg.fw_mon_ini[alloc_id].num_frags);88}89break;90default:91IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",92le32_to_cpu(fw_mon_cfg->buf_location));93}94out:95if (dbg_flags)96*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;97}9899int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans,100const struct iwl_fw *fw,101const struct fw_img *img)102{103struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);104struct iwl_context_info_v2 *ctxt_info_v2;105struct iwl_prph_scratch *prph_scratch;106struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;107struct iwl_prph_info *prph_info;108u32 control_flags = 0;109u32 control_flags_ext = 0;110int ret;111int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,112trans->mac_cfg->base->min_txq_size);113114switch (trans->conf.rx_buf_size) {115case IWL_AMSDU_DEF:116return -EINVAL;117case IWL_AMSDU_2K:118break;119case IWL_AMSDU_4K:120control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;121break;122case IWL_AMSDU_8K:123control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;124/* if firmware supports the ext size, tell it */125control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;126break;127case IWL_AMSDU_12K:128control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;129/* if firmware supports the ext size, tell it */130control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K;131break;132}133134if (trans->conf.dsbr_urm_fw_dependent)135control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;136137if (trans->conf.dsbr_urm_permanent)138control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;139140if (trans->conf.ext_32khz_clock_valid)141control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;142143/* Allocate prph scratch */144prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),145&trans_pcie->prph_scratch_dma_addr,146GFP_KERNEL);147if (!prph_scratch)148return -ENOMEM;149150prph_sc_ctrl = &prph_scratch->ctrl_cfg;151152prph_sc_ctrl->version.version = 0;153prph_sc_ctrl->version.mac_id =154cpu_to_le16((u16)trans->info.hw_rev);155prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);156157control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;158control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;159160if (trans->mac_cfg->imr_enabled)161control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;162163if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&164iwl_is_force_scu_active_approved()) {165control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;166IWL_DEBUG_FW(trans,167"Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n",168IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);169}170171if (trans->do_top_reset) {172WARN_ON(trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC);173control_flags |= IWL_PRPH_SCRATCH_TOP_RESET;174}175176/* initialize RX default queue */177prph_sc_ctrl->rbd_cfg.free_rbd_addr =178cpu_to_le64(trans_pcie->rxq->bd_dma);179180iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,181&control_flags);182prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);183prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);184185/* initialize the Step equalizer data */186prph_sc_ctrl->step_cfg.mbx_addr_0 =187cpu_to_le32(trans->conf.mbx_addr_0_step);188prph_sc_ctrl->step_cfg.mbx_addr_1 =189cpu_to_le32(trans->conf.mbx_addr_1_step);190191/* allocate ucode sections in dram and set addresses */192ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common);193if (ret)194goto err_free_prph_scratch;195196/* Allocate prph information197* currently we don't assign to the prph info anything, but it would get198* assigned later199*200* We also use the second half of this page to give the device some201* dummy TR/CR tail pointers - which shouldn't be necessary as we don't202* use this, but the hardware still reads/writes there and we can't let203* it go do that with a NULL pointer.204*/205BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);206prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,207&trans_pcie->prph_info_dma_addr,208GFP_KERNEL);209if (!prph_info) {210ret = -ENOMEM;211goto err_free_prph_scratch;212}213214/* Allocate context info */215ctxt_info_v2 = dma_alloc_coherent(trans->dev,216sizeof(*ctxt_info_v2),217&trans_pcie->ctxt_info_dma_addr,218GFP_KERNEL);219if (!ctxt_info_v2) {220ret = -ENOMEM;221goto err_free_prph_info;222}223224ctxt_info_v2->prph_info_base_addr =225cpu_to_le64(trans_pcie->prph_info_dma_addr);226ctxt_info_v2->prph_scratch_base_addr =227cpu_to_le64(trans_pcie->prph_scratch_dma_addr);228229/*230* This code assumes the FSEQ is last and we can make that231* optional; old devices _should_ be fine with a bigger size,232* but in simulation we check the size more precisely.233*/234BUILD_BUG_ON(offsetofend(typeof(*prph_scratch), dram.common) +235sizeof(prph_scratch->dram.fseq_img) !=236sizeof(*prph_scratch));237if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ)238ctxt_info_v2->prph_scratch_size =239cpu_to_le32(sizeof(*prph_scratch));240else241ctxt_info_v2->prph_scratch_size =242cpu_to_le32(offsetofend(typeof(*prph_scratch),243dram.common));244245ctxt_info_v2->cr_head_idx_arr_base_addr =246cpu_to_le64(trans_pcie->rxq->rb_stts_dma);247ctxt_info_v2->tr_tail_idx_arr_base_addr =248cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);249ctxt_info_v2->cr_tail_idx_arr_base_addr =250cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);251ctxt_info_v2->mtr_base_addr =252cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);253ctxt_info_v2->mcr_base_addr =254cpu_to_le64(trans_pcie->rxq->used_bd_dma);255ctxt_info_v2->mtr_size =256cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));257ctxt_info_v2->mcr_size =258cpu_to_le16(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)));259260trans_pcie->ctxt_info_v2 = ctxt_info_v2;261trans_pcie->prph_info = prph_info;262trans_pcie->prph_scratch = prph_scratch;263264/* Allocate IML */265trans_pcie->iml_len = fw->iml_len;266trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len,267&trans_pcie->iml_dma_addr,268GFP_KERNEL);269if (!trans_pcie->iml) {270ret = -ENOMEM;271goto err_free_ctxt_info;272}273274memcpy(trans_pcie->iml, fw->iml, fw->iml_len);275276return 0;277278err_free_ctxt_info:279dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),280trans_pcie->ctxt_info_v2,281trans_pcie->ctxt_info_dma_addr);282trans_pcie->ctxt_info_v2 = NULL;283err_free_prph_info:284dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,285trans_pcie->prph_info_dma_addr);286287err_free_prph_scratch:288dma_free_coherent(trans->dev,289sizeof(*prph_scratch),290prph_scratch,291trans_pcie->prph_scratch_dma_addr);292return ret;293294}295296void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans)297{298struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);299300iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset);301302/* kick FW self load */303iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);304iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);305iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len);306307iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,308CSR_AUTO_FUNC_BOOT_ENA);309}310311void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive)312{313struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);314315if (trans_pcie->iml) {316dma_free_coherent(trans->dev, trans_pcie->iml_len,317trans_pcie->iml,318trans_pcie->iml_dma_addr);319trans_pcie->iml_dma_addr = 0;320trans_pcie->iml_len = 0;321trans_pcie->iml = NULL;322}323324iwl_pcie_ctxt_info_free_fw_img(trans);325326if (alive)327return;328329if (!trans_pcie->ctxt_info_v2)330return;331332/* ctxt_info_v2 and prph_scratch are still needed for PNVM load */333dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),334trans_pcie->ctxt_info_v2,335trans_pcie->ctxt_info_dma_addr);336trans_pcie->ctxt_info_dma_addr = 0;337trans_pcie->ctxt_info_v2 = NULL;338339dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),340trans_pcie->prph_scratch,341trans_pcie->prph_scratch_dma_addr);342trans_pcie->prph_scratch_dma_addr = 0;343trans_pcie->prph_scratch = NULL;344345/* this is needed for the entire lifetime */346dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,347trans_pcie->prph_info_dma_addr);348trans_pcie->prph_info_dma_addr = 0;349trans_pcie->prph_info = NULL;350}351352static int iwl_pcie_load_payloads_contig(struct iwl_trans *trans,353const struct iwl_pnvm_image *pnvm_data,354struct iwl_dram_data *dram)355{356u32 len, len0, len1;357358if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {359IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n",360pnvm_data->n_chunks);361return -EINVAL;362}363364len0 = pnvm_data->chunks[0].len;365len1 = pnvm_data->chunks[1].len;366if (len1 > 0xFFFFFFFF - len0) {367IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n");368return -EINVAL;369}370len = len0 + len1;371372dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,373&dram->physical);374if (!dram->block) {375IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");376return -ENOMEM;377}378379dram->size = len;380memcpy(dram->block, pnvm_data->chunks[0].data, len0);381memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);382383return 0;384}385386static int iwl_pcie_load_payloads_segments387(struct iwl_trans *trans,388struct iwl_dram_regions *dram_regions,389const struct iwl_pnvm_image *pnvm_data)390{391struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];392struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;393struct iwl_prph_scratch_mem_desc_addr_array *addresses;394const void *data;395u32 len;396int i;397398/* allocate and init DRAM descriptors array */399len = sizeof(struct iwl_prph_scratch_mem_desc_addr_array);400desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent401(trans,402len,403&desc_dram->physical);404if (!desc_dram->block) {405IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");406return -ENOMEM;407}408desc_dram->size = len;409memset(desc_dram->block, 0, len);410411/* allocate DRAM region for each payload */412dram_regions->n_regions = 0;413for (i = 0; i < pnvm_data->n_chunks; i++) {414len = pnvm_data->chunks[i].len;415data = pnvm_data->chunks[i].data;416417if (iwl_pcie_ctxt_info_alloc_dma(trans,418data,419len,420cur_payload_dram)) {421iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,422trans->dev);423return -ENOMEM;424}425426dram_regions->n_regions++;427cur_payload_dram++;428}429430/* fill desc with the DRAM payloads addresses */431addresses = desc_dram->block;432for (i = 0; i < pnvm_data->n_chunks; i++) {433addresses->mem_descs[i] =434cpu_to_le64(dram_regions->drams[i].physical);435}436437return 0;438439}440441int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans,442const struct iwl_pnvm_image *pnvm_payloads,443const struct iwl_ucode_capabilities *capa)444{445struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);446struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =447&trans_pcie->prph_scratch->ctrl_cfg;448struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;449int ret = 0;450451/* only allocate the DRAM if not allocated yet */452if (trans->pnvm_loaded)453return 0;454455if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))456return -EBUSY;457458if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)459return 0;460461if (!pnvm_payloads->n_chunks) {462IWL_DEBUG_FW(trans, "no payloads\n");463return -EINVAL;464}465466/* save payloads in several DRAM sections */467if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {468ret = iwl_pcie_load_payloads_segments(trans,469dram_regions,470pnvm_payloads);471if (!ret)472trans->pnvm_loaded = true;473} else {474/* save only in one DRAM section */475ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads,476&dram_regions->drams[0]);477if (!ret) {478dram_regions->n_regions = 1;479trans->pnvm_loaded = true;480}481}482483return ret;484}485486static inline size_t487iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)488{489size_t total_size = 0;490int i;491492for (i = 0; i < dram_regions->n_regions; i++)493total_size += dram_regions->drams[i].size;494495return total_size;496}497498static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)499{500struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);501struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =502&trans_pcie->prph_scratch->ctrl_cfg;503struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;504505prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =506cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);507prph_sc_ctrl->pnvm_cfg.pnvm_size =508cpu_to_le32(iwl_dram_regions_size(dram_regions));509}510511static void iwl_pcie_set_contig_pnvm(struct iwl_trans *trans)512{513struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);514struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =515&trans_pcie->prph_scratch->ctrl_cfg;516517prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =518cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);519prph_sc_ctrl->pnvm_cfg.pnvm_size =520cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);521}522523void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans,524const struct iwl_ucode_capabilities *capa)525{526if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)527return;528529if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))530iwl_pcie_set_pnvm_segments(trans);531else532iwl_pcie_set_contig_pnvm(trans);533}534535int iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans,536const struct iwl_pnvm_image *payloads,537const struct iwl_ucode_capabilities *capa)538{539struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);540struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =541&trans_pcie->prph_scratch->ctrl_cfg;542struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;543int ret = 0;544545/* only allocate the DRAM if not allocated yet */546if (trans->reduce_power_loaded)547return 0;548549if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)550return 0;551552if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))553return -EBUSY;554555if (!payloads->n_chunks) {556IWL_DEBUG_FW(trans, "no payloads\n");557return -EINVAL;558}559560/* save payloads in several DRAM sections */561if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {562ret = iwl_pcie_load_payloads_segments(trans,563dram_regions,564payloads);565if (!ret)566trans->reduce_power_loaded = true;567} else {568/* save only in one DRAM section */569ret = iwl_pcie_load_payloads_contig(trans, payloads,570&dram_regions->drams[0]);571if (!ret) {572dram_regions->n_regions = 1;573trans->reduce_power_loaded = true;574}575}576577return ret;578}579580static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)581{582struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);583struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =584&trans_pcie->prph_scratch->ctrl_cfg;585struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;586587prph_sc_ctrl->reduce_power_cfg.base_addr =588cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);589prph_sc_ctrl->reduce_power_cfg.size =590cpu_to_le32(iwl_dram_regions_size(dram_regions));591}592593static void iwl_pcie_set_contig_reduce_power(struct iwl_trans *trans)594{595struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);596struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =597&trans_pcie->prph_scratch->ctrl_cfg;598599prph_sc_ctrl->reduce_power_cfg.base_addr =600cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);601prph_sc_ctrl->reduce_power_cfg.size =602cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);603}604605void606iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans,607const struct iwl_ucode_capabilities *capa)608{609if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)610return;611612if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))613iwl_pcie_set_reduce_power_segments(trans);614else615iwl_pcie_set_contig_reduce_power(trans);616}617618619620