Path: blob/main/sys/contrib/dev/iwlwifi/iwl-trans.h
48253 views
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */1/*2* Copyright (C) 2005-2014, 2018-2025 Intel Corporation3* Copyright (C) 2013-2015 Intel Mobile Communications GmbH4* Copyright (C) 2016-2017 Intel Deutschland GmbH5*/6#ifndef __iwl_trans_h__7#define __iwl_trans_h__89#include <linux/ieee80211.h>10#include <linux/mm.h> /* for page_address */11#include <linux/lockdep.h>12#include <linux/kernel.h>1314#include "iwl-debug.h"15#include "iwl-config.h"16#include "fw/img.h"17#include "iwl-op-mode.h"18#include <linux/firmware.h>19#include "fw/api/cmdhdr.h"20#include "fw/api/txq.h"21#include "fw/api/dbg-tlv.h"22#include "iwl-dbg-tlv.h"23#if defined(__FreeBSD__)24#include <linux/skbuff.h>25#include "iwl-modparams.h"26#endif2728/**29* DOC: Transport layer - what is it ?30*31* The transport layer is the layer that deals with the HW directly. It provides32* the PCIe access to the underlying hardwarwe. The transport layer doesn't33* provide any policy, algorithm or anything of this kind, but only mechanisms34* to make the HW do something. It is not completely stateless but close to it.35*/3637/**38* DOC: Life cycle of the transport layer39*40* The transport layer has a very precise life cycle.41*42* 1) A helper function is called during the module initialization and43* registers the bus driver's ops with the transport's alloc function.44* 2) Bus's probe calls to the transport layer's allocation functions.45* Of course this function is bus specific.46* 3) This allocation functions will spawn the upper layer which will47* register mac80211.48*49* 4) At some point (i.e. mac80211's start call), the op_mode will call50* the following sequence:51* start_hw52* start_fw53*54* 5) Then when finished (or reset):55* stop_device56*57* 6) Eventually, the free function will be called.58*/5960/* default preset 0 (start from bit 16)*/61#define IWL_FW_DBG_DOMAIN_POS 1662#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS)6364#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON6566#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */67#define FH_RSCSR_FRAME_INVALID 0x5555000068#define FH_RSCSR_FRAME_ALIGN 0x4069#define FH_RSCSR_RPA_EN BIT(25)70#define FH_RSCSR_RADA_EN BIT(26)71#define FH_RSCSR_RXQ_POS 1672#define FH_RSCSR_RXQ_MASK 0x3F00007374struct iwl_rx_packet {75/*76* The first 4 bytes of the RX frame header contain both the RX frame77* size and some flags.78* Bit fields:79* 31: flag flush RB request80* 30: flag ignore TC (terminal counter) request81* 29: flag fast IRQ request82* 28-27: Reserved83* 26: RADA enabled84* 25: Offload enabled85* 24: RPF enabled86* 23: RSS enabled87* 22: Checksum enabled88* 21-16: RX queue89* 15-14: Reserved90* 13-00: RX frame size91*/92__le32 len_n_flags;93struct iwl_cmd_header hdr;94u8 data[];95} __packed;9697static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)98{99return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;100}101102static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)103{104return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);105}106107/**108* enum CMD_MODE - how to send the host commands ?109*110* @CMD_ASYNC: Return right away and don't wait for the response111* @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of112* the response. The caller needs to call iwl_free_resp when done.113* @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.114* @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.115*/116enum CMD_MODE {117CMD_ASYNC = BIT(0),118CMD_WANT_SKB = BIT(1),119CMD_SEND_IN_RFKILL = BIT(2),120CMD_BLOCK_TXQS = BIT(3),121};122#define CMD_MODE_BITS 5123124#define DEF_CMD_PAYLOAD_SIZE 320125126/**127* struct iwl_device_cmd128*129* For allocation of the command and tx queues, this establishes the overall130* size of the largest command we send to uCode, except for commands that131* aren't fully copied and use other TFD space.132*133* @hdr: command header134* @payload: payload for the command135* @hdr_wide: wide command header136* @payload_wide: payload for the wide command137*/138struct iwl_device_cmd {139union {140struct {141struct iwl_cmd_header hdr; /* uCode API */142u8 payload[DEF_CMD_PAYLOAD_SIZE];143};144struct {145struct iwl_cmd_header_wide hdr_wide;146u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -147sizeof(struct iwl_cmd_header_wide) +148sizeof(struct iwl_cmd_header)];149};150};151} __packed;152153/**154* struct iwl_device_tx_cmd - buffer for TX command155* @hdr: the header156* @payload: the payload placeholder157*158* The actual structure is sized dynamically according to need.159*/160struct iwl_device_tx_cmd {161struct iwl_cmd_header hdr;162u8 payload[];163} __packed;164165#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))166167/*168* number of transfer buffers (fragments) per transmit frame descriptor;169* this is just the driver's idea, the hardware supports 20170*/171#define IWL_MAX_CMD_TBS_PER_TFD 2172173/**174* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command175*176* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's177* ring. The transport layer doesn't map the command's buffer to DMA, but178* rather copies it to a previously allocated DMA buffer. This flag tells179* the transport layer not to copy the command, but to map the existing180* buffer (that is passed in) instead. This saves the memcpy and allows181* commands that are bigger than the fixed buffer to be submitted.182* Note that a TFD entry after a NOCOPY one cannot be a normal copied one.183* @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this184* chunk internally and free it again after the command completes. This185* can (currently) be used only once per command.186* Note that a TFD entry after a DUP one cannot be a normal copied one.187*/188enum iwl_hcmd_dataflag {189IWL_HCMD_DFL_NOCOPY = BIT(0),190IWL_HCMD_DFL_DUP = BIT(1),191};192193enum iwl_error_event_table_status {194IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),195IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),196IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),197IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),198IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),199IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),200IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),201};202203/**204* struct iwl_host_cmd - Host command to the uCode205*206* @data: array of chunks that composes the data of the host command207* @resp_pkt: response packet, if %CMD_WANT_SKB was set208* @_rx_page_order: (internally used to free response packet)209* [ FreeBSD uses _page instead ]210* @_rx_page_addr: (internally used to free response packet)211* @flags: can be CMD_*212* @len: array of the lengths of the chunks in data213* @dataflags: IWL_HCMD_DFL_*214* @id: command id of the host command, for wide commands encoding the215* version and group as well216*/217struct iwl_host_cmd {218const void *data[IWL_MAX_CMD_TBS_PER_TFD];219struct iwl_rx_packet *resp_pkt;220#if defined(__linux__)221unsigned long _rx_page_addr;222#elif defined(__FreeBSD__)223struct page *_page;224#endif225u32 _rx_page_order;226227u32 flags;228u32 id;229u16 len[IWL_MAX_CMD_TBS_PER_TFD];230u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];231};232233static inline void iwl_free_resp(struct iwl_host_cmd *cmd)234{235#if defined(__linux__)236free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);237#elif defined(__FreeBSD__)238__free_pages(cmd->_page, cmd->_rx_page_order);239#endif240}241242struct iwl_rx_cmd_buffer {243struct page *_page;244int _offset;245bool _page_stolen;246u32 _rx_page_order;247unsigned int truesize;248};249250static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)251{252return (void *)((unsigned long)page_address(r->_page) + r->_offset);253}254255static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)256{257return r->_offset;258}259260static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)261{262r->_page_stolen = true;263get_page(r->_page);264return r->_page;265}266267static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)268{269__free_pages(r->_page, r->_rx_page_order);270}271272#define MAX_NO_RECLAIM_CMDS 6273274#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))275276/*277* Maximum number of HW queues the transport layer278* currently supports279*/280#define IWL_MAX_HW_QUEUES 32281#define IWL_MAX_TVQM_QUEUES 512282283#define IWL_MAX_TID_COUNT 8284#define IWL_MGMT_TID 15285#define IWL_FRAME_LIMIT 64286#define IWL_MAX_RX_HW_QUEUES 16287#define IWL_9000_MAX_RX_HW_QUEUES 1288289/**290* enum iwl_d3_status - WoWLAN image/device status291* @IWL_D3_STATUS_ALIVE: firmware is still running after resume292* @IWL_D3_STATUS_RESET: device was reset while suspended293*/294enum iwl_d3_status {295IWL_D3_STATUS_ALIVE,296IWL_D3_STATUS_RESET,297};298299/**300* enum iwl_trans_status: transport status flags301* @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed302* @STATUS_DEVICE_ENABLED: APM is enabled303* @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)304* @STATUS_INT_ENABLED: interrupts are enabled305* @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch306* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode307* @STATUS_FW_ERROR: the fw is in error state308* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation309* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,310* e.g. for testing311* @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode312* via iwl_trans_finish_sw_reset()313* @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump314* the firmware state yet315* @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't316* attempt another reset yet317* @STATUS_SUSPENDED: device is suspended, don't send commands that318* aren't marked accordingly319*/320enum iwl_trans_status {321STATUS_SYNC_HCMD_ACTIVE,322STATUS_DEVICE_ENABLED,323STATUS_TPOWER_PMI,324STATUS_INT_ENABLED,325STATUS_RFKILL_HW,326STATUS_RFKILL_OPMODE,327STATUS_FW_ERROR,328STATUS_TRANS_DEAD,329STATUS_SUPPRESS_CMD_ERROR_ONCE,330STATUS_IN_SW_RESET,331STATUS_RESET_PENDING,332STATUS_TRANS_RESET_IN_PROGRESS,333STATUS_SUSPENDED,334};335336static inline int337iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)338{339switch (rb_size) {340case IWL_AMSDU_2K:341return get_order(2 * 1024);342case IWL_AMSDU_4K:343return get_order(4 * 1024);344case IWL_AMSDU_8K:345return get_order(8 * 1024);346case IWL_AMSDU_12K:347return get_order(16 * 1024);348default:349WARN_ON(1);350return -1;351}352}353354static inline int355iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)356{357switch (rb_size) {358case IWL_AMSDU_2K:359return 2 * 1024;360case IWL_AMSDU_4K:361return 4 * 1024;362case IWL_AMSDU_8K:363return 8 * 1024;364case IWL_AMSDU_12K:365return 16 * 1024;366default:367WARN_ON(1);368return 0;369}370}371372struct iwl_hcmd_names {373u8 cmd_id;374const char *const cmd_name;375};376377#define HCMD_NAME(x) \378{ .cmd_id = x, .cmd_name = #x }379380struct iwl_hcmd_arr {381const struct iwl_hcmd_names *arr;382int size;383};384385#define HCMD_ARR(x) \386{ .arr = x, .size = ARRAY_SIZE(x) }387388/**389* struct iwl_dump_sanitize_ops - dump sanitization operations390* @frob_txf: Scrub the TX FIFO data391* @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header392* but that might be short or long (&struct iwl_cmd_header or393* &struct iwl_cmd_header_wide)394* @frob_mem: Scrub memory data395*/396struct iwl_dump_sanitize_ops {397void (*frob_txf)(void *ctx, void *buf, size_t buflen);398void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);399void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);400};401402/**403* struct iwl_trans_config - transport configuration404*405* These values should be set before iwl_trans_op_mode_enter().406*407* @cmd_queue: the index of the command queue.408* Must be set before start_fw.409* @cmd_fifo: the fifo for host commands410* @no_reclaim_cmds: Some devices erroneously don't set the411* SEQ_RX_FRAME bit on some notifications, this is the412* list of such notifications to filter. Max length is413* %MAX_NO_RECLAIM_CMDS.414* @n_no_reclaim_cmds: # of commands in list415* @rx_buf_size: RX buffer size needed for A-MSDUs416* if unset 4k will be the RX buffer size417* @scd_set_active: should the transport configure the SCD for HCMD queue418* @command_groups: array of command groups, each member is an array of the419* commands in the group; for debugging only420* @command_groups_size: number of command groups, to avoid illegal access421* @cb_data_offs: offset inside skb->cb to store transport data at, must have422* space for at least two pointers423* @fw_reset_handshake: firmware supports reset flow handshake424* @queue_alloc_cmd_ver: queue allocation command version, set to 0425* for using the older SCD_QUEUE_CFG, set to the version of426* SCD_QUEUE_CONFIG_CMD otherwise.427* @wide_cmd_header: true when ucode supports wide command header format428* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before429* starting the firmware, used for tracing430* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the431* start of the 802.11 header in the @rx_mpdu_cmd432* @dsbr_urm_fw_dependent: switch to URM based on fw settings433* @dsbr_urm_permanent: switch to URM permanently434* @mbx_addr_0_step: step address data 0435* @mbx_addr_1_step: step address data 1436* @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used437*/438struct iwl_trans_config {439u8 cmd_queue;440u8 cmd_fifo;441u8 n_no_reclaim_cmds;442u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];443444enum iwl_amsdu_size rx_buf_size;445bool scd_set_active;446const struct iwl_hcmd_arr *command_groups;447int command_groups_size;448449u8 cb_data_offs;450bool fw_reset_handshake;451u8 queue_alloc_cmd_ver;452453bool wide_cmd_header;454u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;455456u8 dsbr_urm_fw_dependent:1,457dsbr_urm_permanent:1,458ext_32khz_clock_valid:1;459460u32 mbx_addr_0_step;461u32 mbx_addr_1_step;462};463464struct iwl_trans_dump_data {465u32 len;466u8 data[];467};468469struct iwl_trans;470471struct iwl_trans_txq_scd_cfg {472u8 fifo;473u8 sta_id;474u8 tid;475bool aggregate;476int frame_limit;477};478479/**480* struct iwl_trans_rxq_dma_data - RX queue DMA data481* @fr_bd_cb: DMA address of free BD cyclic buffer482* @fr_bd_wid: Initial write index of the free BD cyclic buffer483* @urbd_stts_wrptr: DMA address of urbd_stts_wrptr484* @ur_bd_cb: DMA address of used BD cyclic buffer485*/486struct iwl_trans_rxq_dma_data {487u64 fr_bd_cb;488u32 fr_bd_wid;489u64 urbd_stts_wrptr;490u64 ur_bd_cb;491};492493/* maximal number of DRAM MAP entries supported by FW */494#define IPC_DRAM_MAP_ENTRY_NUM_MAX 64495496/**497* struct iwl_pnvm_image - contains info about the parsed pnvm image498* @chunks: array of pointers to pnvm payloads and their sizes499* @n_chunks: the number of the pnvm payloads.500* @version: the version of the loaded PNVM image501*/502struct iwl_pnvm_image {503struct {504const void *data;505u32 len;506} chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];507u32 n_chunks;508u32 version;509};510511/**512* enum iwl_trans_state - state of the transport layer513*514* @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed515* @IWL_TRANS_FW_STARTED: FW was started, but not alive yet516* @IWL_TRANS_FW_ALIVE: FW has sent an alive response517*/518enum iwl_trans_state {519IWL_TRANS_NO_FW,520IWL_TRANS_FW_STARTED,521IWL_TRANS_FW_ALIVE,522};523524/**525* DOC: Platform power management526*527* In system-wide power management the entire platform goes into a low528* power state (e.g. idle or suspend to RAM) at the same time and the529* device is configured as a wakeup source for the entire platform.530* This is usually triggered by userspace activity (e.g. the user531* presses the suspend button or a power management daemon decides to532* put the platform in low power mode). The device's behavior in this533* mode is dictated by the wake-on-WLAN configuration.534*535* The terms used for the device's behavior are as follows:536*537* - D0: the device is fully powered and the host is awake;538* - D3: the device is in low power mode and only reacts to539* specific events (e.g. magic-packet received or scan540* results found);541*542* These terms reflect the power modes in the firmware and are not to543* be confused with the physical device power state.544*/545546/**547* enum iwl_ini_cfg_state548* @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given549* @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded550* @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs551* are corrupted. The rest of the debug TLVs will still be used552*/553enum iwl_ini_cfg_state {554IWL_INI_CFG_STATE_NOT_LOADED,555IWL_INI_CFG_STATE_LOADED,556IWL_INI_CFG_STATE_CORRUPTED,557};558559/* Max time to wait for nmi interrupt */560#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)561562/**563* struct iwl_dram_data564* @physical: page phy pointer565* @block: pointer to the allocated block/page566* @size: size of the block/page567*/568struct iwl_dram_data {569dma_addr_t physical;570void *block;571int size;572};573574/**575* struct iwl_dram_regions - DRAM regions container structure576* @drams: array of several DRAM areas that contains the pnvm and power577* reduction table payloads.578* @n_regions: number of DRAM regions that were allocated579* @prph_scratch_mem_desc: points to a structure allocated in dram,580* designed to show FW where all the payloads are.581*/582struct iwl_dram_regions {583struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];584struct iwl_dram_data prph_scratch_mem_desc;585u8 n_regions;586};587588/**589* struct iwl_fw_mon - fw monitor per allocation id590* @num_frags: number of fragments591* @frags: an array of DRAM buffer fragments592*/593struct iwl_fw_mon {594u32 num_frags;595struct iwl_dram_data *frags;596};597598/**599* struct iwl_self_init_dram - dram data used by self init process600* @fw: lmac and umac dram data601* @fw_cnt: total number of items in array602* @paging: paging dram data603* @paging_cnt: total number of items in array604*/605struct iwl_self_init_dram {606struct iwl_dram_data *fw;607int fw_cnt;608struct iwl_dram_data *paging;609int paging_cnt;610};611612/**613* struct iwl_imr_data - imr dram data used during debug process614* @imr_enable: imr enable status received from fw615* @imr_size: imr dram size received from fw616* @sram_addr: sram address from debug tlv617* @sram_size: sram size from debug tlv618* @imr2sram_remainbyte: size remained after each dma transfer619* @imr_curr_addr: current dst address used during dma transfer620* @imr_base_addr: imr address received from fw621*/622struct iwl_imr_data {623u32 imr_enable;624u32 imr_size;625u32 sram_addr;626u32 sram_size;627u32 imr2sram_remainbyte;628u64 imr_curr_addr;629__le64 imr_base_addr;630};631632#define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES 32633634/**635* struct iwl_pc_data - program counter details636* @pc_name: cpu name637* @pc_address: cpu program counter638*/639struct iwl_pc_data {640u8 pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];641u32 pc_address;642};643644/**645* struct iwl_trans_debug - transport debug related data646*647* @n_dest_reg: num of reg_ops in %dbg_dest_tlv648* @rec_on: true iff there is a fw debug recording currently active649* @dest_tlv: points to the destination TLV for debug650* @lmac_error_event_table: addrs of lmacs error tables651* @umac_error_event_table: addr of umac error table652* @tcm_error_event_table: address(es) of TCM error table(s)653* @rcm_error_event_table: address(es) of RCM error table(s)654* @error_event_table_tlv_status: bitmap that indicates what error table655* pointers was recevied via TLV. uses enum &iwl_error_event_table_status656* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state657* @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state658* @fw_mon_cfg: debug buffer allocation configuration659* @fw_mon_ini: DRAM buffer fragments per allocation id660* @fw_mon: DRAM buffer for firmware monitor661* @hw_error: equals true if hw error interrupt was received from the FW662* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location663* @unsupported_region_msk: unsupported regions out of active_regions664* @active_regions: active regions665* @debug_info_tlv_list: list of debug info TLVs666* @time_point: array of debug time points667* @periodic_trig_list: periodic triggers list668* @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON669* @ucode_preset: preset based on ucode670* @restart_required: indicates debug restart is required671* @last_tp_resetfw: last handling of reset during debug timepoint672* @imr_data: IMR debug data allocation673* @dump_file_name_ext: dump file name extension674* @dump_file_name_ext_valid: dump file name extension if valid or not675* @num_pc: number of program counter for cpu676* @pc_data: details of the program counter677* @yoyo_bin_loaded: tells if a yoyo debug file has been loaded678*/679struct iwl_trans_debug {680u8 n_dest_reg;681bool rec_on;682683const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;684685u32 lmac_error_event_table[2];686u32 umac_error_event_table;687u32 tcm_error_event_table[2];688u32 rcm_error_event_table[2];689unsigned int error_event_table_tlv_status;690691enum iwl_ini_cfg_state internal_ini_cfg;692enum iwl_ini_cfg_state external_ini_cfg;693694struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];695struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];696697struct iwl_dram_data fw_mon;698699bool hw_error;700enum iwl_fw_ini_buffer_location ini_dest;701702u64 unsupported_region_msk;703struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];704struct list_head debug_info_tlv_list;705struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM];706struct list_head periodic_trig_list;707708u32 domains_bitmap;709u32 ucode_preset;710bool restart_required;711u32 last_tp_resetfw;712struct iwl_imr_data imr_data;713u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];714bool dump_file_name_ext_valid;715u32 num_pc;716struct iwl_pc_data *pc_data;717bool yoyo_bin_loaded;718};719720struct iwl_dma_ptr {721dma_addr_t dma;722void *addr;723size_t size;724};725726struct iwl_cmd_meta {727/* only for SYNC commands, iff the reply skb is wanted */728struct iwl_host_cmd *source;729u32 flags: CMD_MODE_BITS;730/* sg_offset is valid if it is non-zero */731u32 sg_offset: PAGE_SHIFT;732u32 tbs;733};734735/*736* The FH will write back to the first TB only, so we need to copy some data737* into the buffer regardless of whether it should be mapped or not.738* This indicates how big the first TB must be to include the scratch buffer739* and the assigned PN.740* Since PN location is 8 bytes at offset 12, it's 20 now.741* If we make it bigger then allocations will be bigger and copy slower, so742* that's probably not useful.743*/744#define IWL_FIRST_TB_SIZE 20745#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)746747struct iwl_pcie_txq_entry {748void *cmd;749struct sk_buff *skb;750/* buffer to free after command completes */751const void *free_buf;752struct iwl_cmd_meta meta;753};754755struct iwl_pcie_first_tb_buf {756u8 buf[IWL_FIRST_TB_SIZE_ALIGN];757};758759/**760* struct iwl_txq - Tx Queue for DMA761* @tfds: transmit frame descriptors (DMA memory)762* @first_tb_bufs: start of command headers, including scratch buffers, for763* the writeback -- this is DMA memory and an array holding one buffer764* for each command on the queue765* @first_tb_dma: DMA address for the first_tb_bufs start766* @entries: transmit entries (driver state)767* @lock: queue lock768* @reclaim_lock: reclaim lock769* @stuck_timer: timer that fires if queue gets stuck770* @trans: pointer back to transport (for timer)771* @need_update: indicates need to update read/write index772* @ampdu: true if this queue is an ampdu queue for an specific RA/TID773* @wd_timeout: queue watchdog timeout (jiffies) - per queue774* @frozen: tx stuck queue timer is frozen775* @frozen_expiry_remainder: remember how long until the timer fires776* @block: queue is blocked777* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)778* @write_ptr: 1-st empty entry (index) host_w779* @read_ptr: last used entry (index) host_r780* @dma_addr: physical addr for BD's781* @n_window: safe queue window782* @id: queue id783* @low_mark: low watermark, resume queue if free space more than this784* @high_mark: high watermark, stop queue if free space less than this785* @overflow_q: overflow queue for handling frames that didn't fit on HW queue786* @overflow_tx: need to transmit from overflow787*788* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame789* descriptors) and required locking structures.790*791* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware792* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless793* there might be HW changes in the future). For the normal TX794* queues, n_window, which is the size of the software queue data795* is also 256; however, for the command queue, n_window is only796* 32 since we don't need so many commands pending. Since the HW797* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.798* This means that we end up with the following:799* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |800* SW entries: | 0 | ... | 31 |801* where N is a number between 0 and 7. This means that the SW802* data is a window overlayed over the HW queue.803*/804struct iwl_txq {805void *tfds;806struct iwl_pcie_first_tb_buf *first_tb_bufs;807dma_addr_t first_tb_dma;808struct iwl_pcie_txq_entry *entries;809/* lock for syncing changes on the queue */810spinlock_t lock;811/* lock to prevent concurrent reclaim */812spinlock_t reclaim_lock;813unsigned long frozen_expiry_remainder;814struct timer_list stuck_timer;815struct iwl_trans *trans;816bool need_update;817bool frozen;818bool ampdu;819int block;820unsigned long wd_timeout;821struct sk_buff_head overflow_q;822struct iwl_dma_ptr bc_tbl;823824int write_ptr;825int read_ptr;826dma_addr_t dma_addr;827int n_window;828u32 id;829int low_mark;830int high_mark;831832bool overflow_tx;833};834835/**836* struct iwl_trans_info - transport info for outside use837* @name: the device name838* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.839* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.840* @hw_rev: the revision data of the HW841* @hw_rev_step: The mac step of the HW842* @hw_rf_id: the device RF ID843* @hw_cnv_id: the device CNV ID844* @hw_crf_id: the device CRF ID845* @hw_wfpm_id: the device wfpm ID846* @hw_id: the ID of the device / sub-device847* Bits 0:15 represent the sub-device ID848* Bits 16:31 represent the device ID.849* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),850* only valid for discrete (not integrated) NICs851* @num_rxqs: number of RX queues allocated by the transport852*/853struct iwl_trans_info {854const char *name;855u32 max_skb_frags;856u32 hw_rev;857u32 hw_rev_step;858u32 hw_rf_id;859u32 hw_crf_id;860u32 hw_cnv_id;861u32 hw_wfpm_id;862u32 hw_id;863u8 pcie_link_speed;864u8 num_rxqs;865};866867/**868* struct iwl_trans - transport common data869*870* @csme_own: true if we couldn't get ownership on the device871* @op_mode: pointer to the op_mode872* @mac_cfg: the trans-specific configuration part873* @cfg: pointer to the configuration874* @drv: pointer to iwl_drv875* @conf: configuration set by the opmode before enter876* @state: current device state877* @status: a bit-mask of transport status flags878* @dev: pointer to struct device * that represents the device879* @info: device information for use by other layers880* @pnvm_loaded: indicates PNVM was loaded881* @pm_support: set to true in start_hw if link pm is supported882* @ltr_enabled: set to true if the LTR is enabled883* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed884* @reduce_power_loaded: indicates reduced power section was loaded885* @failed_to_load_reduce_power_image: set to true if pnvm loading failed886* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.887* The user should use iwl_trans_{alloc,free}_tx_cmd.888* @dev_cmd_pool_name: name for the TX command allocation pool889* @dbgfs_dir: iwlwifi debugfs base dir for this device890* @sync_cmd_lockdep_map: lockdep map for checking sync commands891* @dbg: additional debug data, see &struct iwl_trans_debug892* @init_dram: FW initialization DMA data893* @reduced_cap_sku: reduced capability supported SKU894* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz895* @restart: restart worker data896* @restart.wk: restart worker897* @restart.mode: reset/restart error mode information898* @restart.during_reset: error occurred during previous software reset899* @trans_specific: data for the specific transport this is allocated for/with900* @request_top_reset: TOP reset was requested, used by the reset901* worker that should be scheduled (with appropriate reason)902* @do_top_reset: indication to the (PCIe) transport/context-info903* to do the TOP reset904*/905struct iwl_trans {906bool csme_own;907struct iwl_op_mode *op_mode;908const struct iwl_mac_cfg *mac_cfg;909const struct iwl_rf_cfg *cfg;910struct iwl_drv *drv;911struct iwl_trans_config conf;912enum iwl_trans_state state;913unsigned long status;914915struct device *dev;916917const struct iwl_trans_info info;918bool reduced_cap_sku;919bool step_urm;920921bool pm_support;922bool ltr_enabled;923u8 pnvm_loaded:1;924u8 fail_to_parse_pnvm_image:1;925u8 reduce_power_loaded:1;926u8 failed_to_load_reduce_power_image:1;927928/* The following fields are internal only */929struct kmem_cache *dev_cmd_pool;930char dev_cmd_pool_name[50];931932struct dentry *dbgfs_dir;933934#ifdef CONFIG_LOCKDEP935struct lockdep_map sync_cmd_lockdep_map;936#endif937938struct iwl_trans_debug dbg;939struct iwl_self_init_dram init_dram;940941struct {942struct delayed_work wk;943struct iwl_fw_error_dump_mode mode;944bool during_reset;945} restart;946947u8 request_top_reset:1,948do_top_reset:1;949950/* pointer to trans specific struct */951/*Ensure that this pointer will always be aligned to sizeof pointer */952char trans_specific[] __aligned(sizeof(void *));953};954955const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);956957void iwl_trans_op_mode_enter(struct iwl_trans *trans,958struct iwl_op_mode *op_mode);959960int iwl_trans_start_hw(struct iwl_trans *trans);961962void iwl_trans_op_mode_leave(struct iwl_trans *trans);963964void iwl_trans_fw_alive(struct iwl_trans *trans);965966int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,967enum iwl_ucode_type ucode_type, bool run_in_rfkill);968969void iwl_trans_stop_device(struct iwl_trans *trans);970971int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);972973int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,974bool test, bool reset);975976struct iwl_trans_dump_data *977iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,978const struct iwl_dump_sanitize_ops *sanitize_ops,979void *sanitize_ctx);980981static inline struct iwl_device_tx_cmd *982iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)983{984return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);985}986987int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);988989static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,990struct iwl_device_tx_cmd *dev_cmd)991{992kmem_cache_free(trans->dev_cmd_pool, dev_cmd);993}994995int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,996struct iwl_device_tx_cmd *dev_cmd, int queue);997998void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,999struct sk_buff_head *skbs, bool is_flush);10001001void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);10021003void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,1004bool configure_scd);10051006bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,1007const struct iwl_trans_txq_scd_cfg *cfg,1008unsigned int queue_wdg_timeout);10091010int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,1011struct iwl_trans_rxq_dma_data *data);10121013void iwl_trans_txq_free(struct iwl_trans *trans, int queue);10141015int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,1016u8 tid, int size, unsigned int wdg_timeout);10171018void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,1019int txq_id, bool shared_mode);10201021static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,1022int fifo, int sta_id, int tid,1023int frame_limit, u16 ssn,1024unsigned int queue_wdg_timeout)1025{1026struct iwl_trans_txq_scd_cfg cfg = {1027.fifo = fifo,1028.sta_id = sta_id,1029.tid = tid,1030.frame_limit = frame_limit,1031.aggregate = sta_id >= 0,1032};10331034iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);1035}10361037static inline1038void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,1039unsigned int queue_wdg_timeout)1040{1041struct iwl_trans_txq_scd_cfg cfg = {1042.fifo = fifo,1043.sta_id = -1,1044.tid = IWL_MAX_TID_COUNT,1045.frame_limit = IWL_FRAME_LIMIT,1046.aggregate = false,1047};10481049iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);1050}10511052void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,1053unsigned long txqs, bool freeze);10541055int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);10561057int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);10581059void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);10601061void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);10621063u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);10641065u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);10661067void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);10681069int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,1070void *buf, int dwords);10711072int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,1073u32 *val);10741075#ifdef CONFIG_IWLWIFI_DEBUGFS1076void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);1077#endif10781079#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \1080({ \1081if (__builtin_constant_p(bufsize)) \1082BUILD_BUG_ON((bufsize) % sizeof(u32)); \1083iwl_trans_read_mem(trans, addr, buf, \1084(bufsize) / sizeof(u32)); \1085})10861087int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,1088u64 src_addr, u32 byte_cnt);10891090static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)1091{1092u32 value;10931094if (iwl_trans_read_mem(trans, addr, &value, 1))1095return 0xa5a5a5a5;10961097return value;1098}10991100int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,1101const void *buf, int dwords);11021103static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,1104u32 val)1105{1106return iwl_trans_write_mem(trans, addr, &val, 1);1107}11081109void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);11101111int iwl_trans_sw_reset(struct iwl_trans *trans);11121113void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,1114u32 mask, u32 value);11151116bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);11171118#define iwl_trans_grab_nic_access(trans) \1119__cond_lock(nic_access, \1120likely(_iwl_trans_grab_nic_access(trans)))11211122void __releases(nic_access)1123iwl_trans_release_nic_access(struct iwl_trans *trans);11241125static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,1126enum iwl_fw_error_type type)1127{1128if (test_bit(STATUS_TRANS_DEAD, &trans->status))1129return;1130/* clear this on device init, not cleared on any unbind/reprobe */1131if (test_and_set_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status))1132return;11331134trans->restart.mode.type = type;1135trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER;11361137set_bit(STATUS_RESET_PENDING, &trans->status);11381139/*1140* keep track of whether or not this happened while resetting,1141* by the timer the worker runs it might have finished1142*/1143trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,1144&trans->status);1145queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0);1146}11471148static inline void iwl_trans_fw_error(struct iwl_trans *trans,1149enum iwl_fw_error_type type)1150{1151if (WARN_ON_ONCE(!trans->op_mode))1152return;11531154/* prevent double restarts due to the same erroneous FW */1155if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {1156trans->state = IWL_TRANS_NO_FW;1157iwl_op_mode_nic_error(trans->op_mode, type);1158iwl_trans_schedule_reset(trans, type);1159}1160}11611162static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans,1163enum iwl_fw_error_type type)1164{1165if (WARN_ON_ONCE(!trans->op_mode))1166return;11671168set_bit(STATUS_IN_SW_RESET, &trans->status);11691170if (WARN_ON(type == IWL_ERR_TYPE_TOP_RESET_BY_BT))1171return;11721173if (!trans->op_mode->ops->sw_reset ||1174!trans->op_mode->ops->sw_reset(trans->op_mode, type))1175clear_bit(STATUS_IN_SW_RESET, &trans->status);1176}11771178static inline bool iwl_trans_fw_running(struct iwl_trans *trans)1179{1180return trans->state == IWL_TRANS_FW_ALIVE;1181}11821183void iwl_trans_sync_nmi(struct iwl_trans *trans);11841185void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,1186u32 sw_err_bit);11871188int iwl_trans_load_pnvm(struct iwl_trans *trans,1189const struct iwl_pnvm_image *pnvm_data,1190const struct iwl_ucode_capabilities *capa);11911192void iwl_trans_set_pnvm(struct iwl_trans *trans,1193const struct iwl_ucode_capabilities *capa);11941195int iwl_trans_load_reduce_power(struct iwl_trans *trans,1196const struct iwl_pnvm_image *payloads,1197const struct iwl_ucode_capabilities *capa);11981199void iwl_trans_set_reduce_power(struct iwl_trans *trans,1200const struct iwl_ucode_capabilities *capa);12011202static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)1203{1204return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||1205trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;1206}12071208void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);12091210static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)1211{1212clear_bit(STATUS_IN_SW_RESET, &trans->status);1213}12141215/*****************************************************1216* transport helper functions1217*****************************************************/1218struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,1219struct device *dev,1220const struct iwl_mac_cfg *mac_cfg,1221unsigned int txcmd_size,1222unsigned int txcmd_align);1223void iwl_trans_free(struct iwl_trans *trans);12241225static inline bool iwl_trans_is_hw_error_value(u32 val)1226{1227return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);1228}12291230void iwl_trans_free_restart_list(void);12311232static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)1233{1234u16 result = trans->cfg->num_rbds;12351236/*1237* Since AX210 family (So/Ty) the device cannot put mutliple1238* frames into the same buffer, so double the value for them.1239*/1240if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)1241return 2 * result;1242return result;1243}12441245static inline void iwl_trans_suppress_cmd_error_once(struct iwl_trans *trans)1246{1247set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status);1248}12491250static inline bool iwl_trans_device_enabled(struct iwl_trans *trans)1251{1252return test_bit(STATUS_DEVICE_ENABLED, &trans->status);1253}12541255static inline bool iwl_trans_is_dead(struct iwl_trans *trans)1256{1257return test_bit(STATUS_TRANS_DEAD, &trans->status);1258}12591260/*****************************************************1261* PCIe handling1262*****************************************************/1263int __must_check iwl_pci_register_driver(void);1264void iwl_pci_unregister_driver(void);12651266/* Note: order matters */1267enum iwl_reset_mode {1268/* upper level modes: */1269IWL_RESET_MODE_SW_RESET,1270IWL_RESET_MODE_REPROBE,1271/* TOP reset doesn't require PCIe remove */1272IWL_RESET_MODE_TOP_RESET,1273/* PCIE level modes: */1274IWL_RESET_MODE_REMOVE_ONLY,1275IWL_RESET_MODE_RESCAN,1276IWL_RESET_MODE_FUNC_RESET,1277IWL_RESET_MODE_PROD_RESET,12781279/* keep last - special backoff value */1280IWL_RESET_MODE_BACKOFF,1281};12821283void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);1284void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans);12851286int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,1287struct iwl_host_cmd *cmd);12881289/* Internal helper */1290static inline void iwl_trans_set_info(struct iwl_trans *trans,1291struct iwl_trans_info *info)1292{1293struct iwl_trans_info *write;12941295write = (void *)(uintptr_t)&trans->info;1296*write = *info;1297}12981299static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)1300{1301return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));1302}13031304#endif /* __iwl_trans_h__ */130513061307