Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/iwlwifi/pcie/gen1_2/internal.h
48406 views
1
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2
/*
3
* Copyright (C) 2003-2015, 2018-2025 Intel Corporation
4
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5
* Copyright (C) 2016-2017 Intel Deutschland GmbH
6
*/
7
#ifndef __iwl_trans_int_pcie_h__
8
#define __iwl_trans_int_pcie_h__
9
10
#include <linux/spinlock.h>
11
#include <linux/interrupt.h>
12
#include <linux/skbuff.h>
13
#include <linux/wait.h>
14
#include <linux/pci.h>
15
#include <linux/timer.h>
16
#include <linux/cpu.h>
17
18
#include "iwl-fh.h"
19
#include "iwl-csr.h"
20
#include "iwl-trans.h"
21
#include "iwl-debug.h"
22
#include "iwl-io.h"
23
#include "iwl-op-mode.h"
24
#include "iwl-drv.h"
25
#include "pcie/iwl-context-info.h"
26
27
/*
28
* RX related structures and functions
29
*/
30
#define RX_NUM_QUEUES 1
31
#define RX_POST_REQ_ALLOC 2
32
#define RX_CLAIM_REQ_ALLOC 8
33
#define RX_PENDING_WATERMARK 16
34
#define FIRST_RX_QUEUE 512
35
36
struct iwl_host_cmd;
37
38
/*This file includes the declaration that are internal to the
39
* trans_pcie layer */
40
41
/**
42
* struct iwl_rx_mem_buffer - driver-side RX buffer descriptor
43
* @page_dma: bus address of rxb page
44
* @page: driver's pointer to the rxb page
45
* @list: list entry for the membuffer
46
* @invalid: rxb is in driver ownership - not owned by HW
47
* @vid: index of this rxb in the global table
48
* @offset: indicates which offset of the page (in bytes)
49
* this buffer uses (if multiple RBs fit into one page)
50
*/
51
struct iwl_rx_mem_buffer {
52
dma_addr_t page_dma;
53
struct page *page;
54
struct list_head list;
55
u32 offset;
56
u16 vid;
57
bool invalid;
58
};
59
60
/* interrupt statistics */
61
struct isr_statistics {
62
u32 hw;
63
u32 sw;
64
u32 err_code;
65
u32 sch;
66
u32 alive;
67
u32 rfkill;
68
u32 ctkill;
69
u32 wakeup;
70
u32 rx;
71
u32 tx;
72
u32 unhandled;
73
};
74
75
/**
76
* struct iwl_rx_transfer_desc - transfer descriptor
77
* @addr: ptr to free buffer start address
78
* @rbid: unique tag of the buffer
79
* @reserved: reserved
80
*/
81
struct iwl_rx_transfer_desc {
82
__le16 rbid;
83
__le16 reserved[3];
84
__le64 addr;
85
} __packed;
86
87
#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
88
89
/**
90
* struct iwl_rx_completion_desc - completion descriptor
91
* @reserved1: reserved
92
* @rbid: unique tag of the received buffer
93
* @flags: flags (0: fragmented, all others: reserved)
94
* @reserved2: reserved
95
*/
96
struct iwl_rx_completion_desc {
97
__le32 reserved1;
98
__le16 rbid;
99
u8 flags;
100
u8 reserved2[25];
101
} __packed;
102
103
/**
104
* struct iwl_rx_completion_desc_bz - Bz completion descriptor
105
* @rbid: unique tag of the received buffer
106
* @flags: flags (0: fragmented, all others: reserved)
107
* @reserved: reserved
108
*/
109
struct iwl_rx_completion_desc_bz {
110
__le16 rbid;
111
u8 flags;
112
u8 reserved[1];
113
} __packed;
114
115
/**
116
* struct iwl_rxq - Rx queue
117
* @id: queue index
118
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
119
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
120
* In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
121
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
122
* @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
123
* @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
124
* @read: Shared index to newest available Rx buffer
125
* @write: Shared index to oldest written Rx packet
126
* @write_actual: actual write pointer written to device, since we update in
127
* blocks of 8 only
128
* @free_count: Number of pre-allocated buffers in rx_free
129
* @used_count: Number of RBDs handled to allocator to use for allocation
130
* @write_actual:
131
* @rx_free: list of RBDs with allocated RB ready for use
132
* @rx_used: list of RBDs with no RB attached
133
* @need_update: flag to indicate we need to update read/write index
134
* @rb_stts: driver's pointer to receive buffer status
135
* @rb_stts_dma: bus address of receive buffer status
136
* @lock: per-queue lock
137
* @queue: actual rx queue. Not used for multi-rx queue.
138
* @next_rb_is_fragment: indicates that the previous RB that we handled set
139
* the fragmented flag, so the next one is still another fragment
140
* @napi: NAPI struct for this queue
141
* @queue_size: size of this queue
142
*
143
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
144
*/
145
struct iwl_rxq {
146
int id;
147
void *bd;
148
dma_addr_t bd_dma;
149
void *used_bd;
150
dma_addr_t used_bd_dma;
151
u32 read;
152
u32 write;
153
u32 free_count;
154
u32 used_count;
155
u32 write_actual;
156
u32 queue_size;
157
struct list_head rx_free;
158
struct list_head rx_used;
159
bool need_update, next_rb_is_fragment;
160
void *rb_stts;
161
dma_addr_t rb_stts_dma;
162
spinlock_t lock;
163
struct napi_struct napi;
164
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
165
};
166
167
/**
168
* struct iwl_rb_allocator - Rx allocator
169
* @req_pending: number of requests the allcator had not processed yet
170
* @req_ready: number of requests honored and ready for claiming
171
* @rbd_allocated: RBDs with pages allocated and ready to be handled to
172
* the queue. This is a list of &struct iwl_rx_mem_buffer
173
* @rbd_empty: RBDs with no page attached for allocator use. This is a list
174
* of &struct iwl_rx_mem_buffer
175
* @lock: protects the rbd_allocated and rbd_empty lists
176
* @alloc_wq: work queue for background calls
177
* @rx_alloc: work struct for background calls
178
*/
179
struct iwl_rb_allocator {
180
atomic_t req_pending;
181
atomic_t req_ready;
182
struct list_head rbd_allocated;
183
struct list_head rbd_empty;
184
spinlock_t lock;
185
struct workqueue_struct *alloc_wq;
186
struct work_struct rx_alloc;
187
};
188
189
/**
190
* iwl_get_closed_rb_stts - get closed rb stts from different structs
191
* @trans: transport pointer (for configuration)
192
* @rxq: the rxq to get the rb stts from
193
* Return: last closed RB index
194
*/
195
static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
196
struct iwl_rxq *rxq)
197
{
198
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
199
__le16 *rb_stts = rxq->rb_stts;
200
201
return le16_to_cpu(READ_ONCE(*rb_stts));
202
} else {
203
struct iwl_rb_status *rb_stts = rxq->rb_stts;
204
205
return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF;
206
}
207
}
208
209
#ifdef CONFIG_IWLWIFI_DEBUGFS
210
/**
211
* enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
212
* debugfs file
213
*
214
* @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
215
* @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
216
* @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
217
* set the file can no longer be used.
218
*/
219
enum iwl_fw_mon_dbgfs_state {
220
IWL_FW_MON_DBGFS_STATE_CLOSED,
221
IWL_FW_MON_DBGFS_STATE_OPEN,
222
IWL_FW_MON_DBGFS_STATE_DISABLED,
223
};
224
#endif
225
226
/**
227
* enum iwl_shared_irq_flags - level of sharing for irq
228
* @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
229
* @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
230
*/
231
enum iwl_shared_irq_flags {
232
IWL_SHARED_IRQ_NON_RX = BIT(0),
233
IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
234
};
235
236
/**
237
* enum iwl_image_response_code - image response values
238
* @IWL_IMAGE_RESP_DEF: the default value of the register
239
* @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
240
* @IWL_IMAGE_RESP_FAIL: iml reading failed
241
*/
242
enum iwl_image_response_code {
243
IWL_IMAGE_RESP_DEF = 0,
244
IWL_IMAGE_RESP_SUCCESS = 1,
245
IWL_IMAGE_RESP_FAIL = 2,
246
};
247
248
#ifdef CONFIG_IWLWIFI_DEBUGFS
249
/**
250
* struct cont_rec: continuous recording data structure
251
* @prev_wr_ptr: the last address that was read in monitor_data
252
* debugfs file
253
* @prev_wrap_cnt: the wrap count that was used during the last read in
254
* monitor_data debugfs file
255
* @state: the state of monitor_data debugfs file as described
256
* in &iwl_fw_mon_dbgfs_state enum
257
* @mutex: locked while reading from monitor_data debugfs file
258
*/
259
struct cont_rec {
260
u32 prev_wr_ptr;
261
u32 prev_wrap_cnt;
262
u8 state;
263
/* Used to sync monitor_data debugfs file with driver unload flow */
264
struct mutex mutex;
265
};
266
#endif
267
268
enum iwl_pcie_fw_reset_state {
269
FW_RESET_IDLE,
270
FW_RESET_REQUESTED,
271
FW_RESET_OK,
272
FW_RESET_ERROR,
273
FW_RESET_TOP_REQUESTED,
274
};
275
276
/**
277
* enum iwl_pcie_imr_status - imr dma transfer state
278
* @IMR_D2S_IDLE: default value of the dma transfer
279
* @IMR_D2S_REQUESTED: dma transfer requested
280
* @IMR_D2S_COMPLETED: dma transfer completed
281
* @IMR_D2S_ERROR: dma transfer error
282
*/
283
enum iwl_pcie_imr_status {
284
IMR_D2S_IDLE,
285
IMR_D2S_REQUESTED,
286
IMR_D2S_COMPLETED,
287
IMR_D2S_ERROR,
288
};
289
290
/**
291
* struct iwl_pcie_txqs - TX queues data
292
*
293
* @queue_used: bit mask of used queues
294
* @queue_stopped: bit mask of stopped queues
295
* @txq: array of TXQ data structures representing the TXQs
296
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
297
* @bc_pool: bytecount DMA allocations pool
298
* @bc_tbl_size: bytecount table size
299
* @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
300
* (and similar usage)
301
* @tfd: TFD data
302
* @tfd.max_tbs: max number of buffers per TFD
303
* @tfd.size: TFD size
304
* @tfd.addr_size: TFD/TB address size
305
*/
306
struct iwl_pcie_txqs {
307
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
308
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
309
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
310
struct dma_pool *bc_pool;
311
size_t bc_tbl_size;
312
struct iwl_tso_hdr_page __percpu *tso_hdr_page;
313
314
struct {
315
u8 max_tbs;
316
u16 size;
317
u8 addr_size;
318
} tfd;
319
320
struct iwl_dma_ptr scd_bc_tbls;
321
};
322
323
/**
324
* struct iwl_trans_pcie - PCIe transport specific data
325
* @rxq: all the RX queue data
326
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
327
* @global_table: table mapping received VID from hw to rxb
328
* @rba: allocator for RX replenishing
329
* @ctxt_info: context information for FW self init
330
* @ctxt_info_v2: context information for v1 devices
331
* @prph_info: prph info for self init
332
* @prph_scratch: prph scratch for self init
333
* @ctxt_info_dma_addr: dma addr of context information
334
* @prph_info_dma_addr: dma addr of prph info
335
* @prph_scratch_dma_addr: dma addr of prph scratch
336
* @ctxt_info_dma_addr: dma addr of context information
337
* @iml: image loader image virtual address
338
* @iml_len: image loader image size
339
* @iml_dma_addr: image loader image DMA address
340
* @trans: pointer to the generic transport area
341
* @scd_base_addr: scheduler sram base address in SRAM
342
* @kw: keep warm address
343
* @pnvm_data: holds info about pnvm payloads allocated in DRAM
344
* @reduced_tables_data: holds info about power reduced tablse
345
* payloads allocated in DRAM
346
* @pci_dev: basic pci-network driver stuff
347
* @hw_base: pci hardware address support
348
* @ucode_write_complete: indicates that the ucode has been copied.
349
* @ucode_write_waitq: wait queue for uCode load
350
* @rx_page_order: page order for receive buffer size
351
* @rx_buf_bytes: RX buffer (RB) size in bytes
352
* @reg_lock: protect hw register access
353
* @mutex: to protect stop_device / start_fw / start_hw
354
* @fw_mon_data: fw continuous recording data
355
* @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround
356
* during commands in flight
357
* @msix_entries: array of MSI-X entries
358
* @msix_enabled: true if managed to enable MSI-X
359
* @shared_vec_mask: the type of causes the shared vector handles
360
* (see iwl_shared_irq_flags).
361
* @alloc_vecs: the number of interrupt vectors allocated by the OS
362
* @def_irq: default irq for non rx causes
363
* @fh_init_mask: initial unmasked fh causes
364
* @hw_init_mask: initial unmasked hw causes
365
* @fh_mask: current unmasked fh causes
366
* @hw_mask: current unmasked hw causes
367
* @in_rescan: true if we have triggered a device rescan
368
* @base_rb_stts: base virtual address of receive buffer status for all queues
369
* @base_rb_stts_dma: base physical address of receive buffer status
370
* @supported_dma_mask: DMA mask to validate the actual address against,
371
* will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
372
* @alloc_page_lock: spinlock for the page allocator
373
* @alloc_page: allocated page to still use parts of
374
* @alloc_page_used: how much of the allocated page was already used (bytes)
375
* @imr_status: imr dma state machine
376
* @imr_waitq: imr wait queue for dma completion
377
* @rf_name: name/version of the CRF, if any
378
* @use_ict: whether or not ICT (interrupt table) is used
379
* @ict_index: current ICT read index
380
* @ict_tbl: ICT table pointer
381
* @ict_tbl_dma: ICT table DMA address
382
* @inta_mask: interrupt (INT-A) mask
383
* @irq_lock: lock to synchronize IRQ handling
384
* @txq_memory: TXQ allocation array
385
* @sx_waitq: waitqueue for Sx transitions
386
* @sx_state: state tracking Sx transitions
387
* @opmode_down: indicates opmode went away
388
* @num_rx_bufs: number of RX buffers to allocate/use
389
* @affinity_mask: IRQ affinity mask for each RX queue
390
* @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
391
* enable/disable
392
* @fw_reset_state: state of FW reset handshake
393
* @fw_reset_waitq: waitqueue for FW reset handshake
394
* @is_down: indicates the NIC is down
395
* @isr_stats: interrupt statistics
396
* @napi_dev: (fake) netdev for NAPI registration
397
* @txqs: transport tx queues data.
398
* @me_present: WiAMT/CSME is detected as present (1), not present (0)
399
* or unknown (-1, so can still use it as a boolean safely)
400
* @me_recheck_wk: worker to recheck WiAMT/CSME presence
401
* @invalid_tx_cmd: invalid TX command buffer
402
* @wait_command_queue: wait queue for sync commands
403
*/
404
struct iwl_trans_pcie {
405
struct iwl_rxq *rxq;
406
struct iwl_rx_mem_buffer *rx_pool;
407
struct iwl_rx_mem_buffer **global_table;
408
struct iwl_rb_allocator rba;
409
union {
410
struct iwl_context_info *ctxt_info;
411
struct iwl_context_info_v2 *ctxt_info_v2;
412
};
413
struct iwl_prph_info *prph_info;
414
struct iwl_prph_scratch *prph_scratch;
415
void *iml;
416
size_t iml_len;
417
dma_addr_t ctxt_info_dma_addr;
418
dma_addr_t prph_info_dma_addr;
419
dma_addr_t prph_scratch_dma_addr;
420
dma_addr_t iml_dma_addr;
421
struct iwl_trans *trans;
422
423
struct net_device *napi_dev;
424
425
/* INT ICT Table */
426
__le32 *ict_tbl;
427
dma_addr_t ict_tbl_dma;
428
int ict_index;
429
bool use_ict;
430
bool is_down, opmode_down;
431
s8 debug_rfkill;
432
struct isr_statistics isr_stats;
433
434
spinlock_t irq_lock;
435
struct mutex mutex;
436
u32 inta_mask;
437
u32 scd_base_addr;
438
struct iwl_dma_ptr kw;
439
440
/* pnvm data */
441
struct iwl_dram_regions pnvm_data;
442
struct iwl_dram_regions reduced_tables_data;
443
444
struct iwl_txq *txq_memory;
445
446
/* PCI bus related data */
447
struct pci_dev *pci_dev;
448
u8 __iomem *hw_base;
449
450
bool ucode_write_complete;
451
enum {
452
IWL_SX_INVALID = 0,
453
IWL_SX_WAITING,
454
IWL_SX_ERROR,
455
IWL_SX_COMPLETE,
456
} sx_state;
457
wait_queue_head_t ucode_write_waitq;
458
wait_queue_head_t sx_waitq;
459
460
u16 num_rx_bufs;
461
462
u32 rx_page_order;
463
u32 rx_buf_bytes;
464
u32 supported_dma_mask;
465
466
/* allocator lock for the two values below */
467
spinlock_t alloc_page_lock;
468
struct page *alloc_page;
469
u32 alloc_page_used;
470
471
/*protect hw register */
472
spinlock_t reg_lock;
473
bool cmd_hold_nic_awake;
474
475
#ifdef CONFIG_IWLWIFI_DEBUGFS
476
struct cont_rec fw_mon_data;
477
#endif
478
479
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
480
bool msix_enabled;
481
u8 shared_vec_mask;
482
u32 alloc_vecs;
483
u32 def_irq;
484
u32 fh_init_mask;
485
u32 hw_init_mask;
486
u32 fh_mask;
487
u32 hw_mask;
488
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
489
u16 tx_cmd_queue_size;
490
bool in_rescan;
491
492
void *base_rb_stts;
493
dma_addr_t base_rb_stts_dma;
494
495
enum iwl_pcie_fw_reset_state fw_reset_state;
496
wait_queue_head_t fw_reset_waitq;
497
enum iwl_pcie_imr_status imr_status;
498
wait_queue_head_t imr_waitq;
499
char rf_name[32];
500
501
struct iwl_pcie_txqs txqs;
502
503
s8 me_present;
504
struct delayed_work me_recheck_wk;
505
506
struct iwl_dma_ptr invalid_tx_cmd;
507
508
wait_queue_head_t wait_command_queue;
509
};
510
511
static inline struct iwl_trans_pcie *
512
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
513
{
514
return (void *)trans->trans_specific;
515
}
516
517
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
518
{
519
/*
520
* Before sending the interrupt the HW disables it to prevent
521
* a nested interrupt. This is done by writing 1 to the corresponding
522
* bit in the mask register. After handling the interrupt, it should be
523
* re-enabled by clearing this bit. This register is defined as
524
* write 1 clear (W1C) register, meaning that it's being clear
525
* by writing 1 to the bit.
526
*/
527
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
528
}
529
530
static inline struct iwl_trans *
531
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
532
{
533
return container_of((void *)trans_pcie, struct iwl_trans,
534
trans_specific);
535
}
536
537
/*
538
* Convention: trans API functions: iwl_trans_pcie_XXX
539
* Other functions: iwl_pcie_XXX
540
*/
541
void iwl_trans_pcie_free(struct iwl_trans *trans);
542
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
543
struct device *dev);
544
545
bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
546
#define _iwl_trans_pcie_grab_nic_access(trans, silent) \
547
__cond_lock(nic_access_nobh, \
548
likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
549
550
void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
551
void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
552
553
/*****************************************************
554
* RX
555
******************************************************/
556
int iwl_pcie_rx_init(struct iwl_trans *trans);
557
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
558
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
559
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
560
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
561
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
562
int iwl_pcie_rx_stop(struct iwl_trans *trans);
563
void iwl_pcie_rx_free(struct iwl_trans *trans);
564
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
565
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
566
void iwl_pcie_rx_napi_sync(struct iwl_trans *trans);
567
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
568
struct iwl_rxq *rxq);
569
570
/*****************************************************
571
* ICT - interrupt handling
572
******************************************************/
573
irqreturn_t iwl_pcie_isr(int irq, void *data);
574
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
575
void iwl_pcie_free_ict(struct iwl_trans *trans);
576
void iwl_pcie_reset_ict(struct iwl_trans *trans);
577
void iwl_pcie_disable_ict(struct iwl_trans *trans);
578
579
/*****************************************************
580
* TX / HCMD
581
******************************************************/
582
/* We need 2 entries for the TX command and header, and another one might
583
* be needed for potential data in the SKB's head. The remaining ones can
584
* be used for frags.
585
*/
586
#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
587
588
struct iwl_tso_hdr_page {
589
struct page *page;
590
u8 *pos;
591
};
592
593
/*
594
* Note that we put this struct *last* in the page. By doing that, we ensure
595
* that no TB referencing this page can trigger the 32-bit boundary hardware
596
* bug.
597
*/
598
struct iwl_tso_page_info {
599
dma_addr_t dma_addr;
600
struct page *next;
601
refcount_t use_count;
602
};
603
604
#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
605
#define IWL_TSO_PAGE_INFO(addr) \
606
((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
607
IWL_TSO_PAGE_DATA_SIZE))
608
609
int iwl_pcie_tx_init(struct iwl_trans *trans);
610
void iwl_pcie_tx_start(struct iwl_trans *trans);
611
int iwl_pcie_tx_stop(struct iwl_trans *trans);
612
void iwl_pcie_tx_free(struct iwl_trans *trans);
613
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
614
const struct iwl_trans_txq_scd_cfg *cfg,
615
unsigned int wdg_timeout);
616
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
617
bool configure_scd);
618
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
619
bool shared_mode);
620
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
621
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
622
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
623
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
624
struct iwl_rx_cmd_buffer *rxb);
625
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
626
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
627
int slots_num, bool cmd_queue);
628
629
dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
630
unsigned int len);
631
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
632
struct iwl_cmd_meta *cmd_meta,
633
u8 **hdr, unsigned int hdr_room,
634
unsigned int offset);
635
636
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
637
struct iwl_cmd_meta *cmd_meta);
638
639
static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
640
{
641
dma_addr_t res;
642
643
res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
644
res += (unsigned long)addr & ~PAGE_MASK;
645
646
return res;
647
}
648
649
static inline dma_addr_t
650
iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
651
{
652
return txq->first_tb_dma +
653
sizeof(struct iwl_pcie_first_tb_buf) * idx;
654
}
655
656
static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
657
{
658
return index & (q->n_window - 1);
659
}
660
661
static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
662
struct iwl_txq *txq, int idx)
663
{
664
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
665
666
if (trans->mac_cfg->gen2)
667
idx = iwl_txq_get_cmd_index(txq, idx);
668
669
return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
670
}
671
672
/*
673
* We need this inline in case dma_addr_t is only 32-bits - since the
674
* hardware is always 64-bit, the issue can still occur in that case,
675
* so use u64 for 'phys' here to force the addition in 64-bit.
676
*/
677
static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
678
{
679
return upper_32_bits(phys) != upper_32_bits(phys + len);
680
}
681
682
int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
683
684
static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
685
{
686
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
687
688
if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
689
iwl_op_mode_queue_full(trans->op_mode, txq->id);
690
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
691
} else {
692
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
693
txq->id);
694
}
695
}
696
697
/**
698
* iwl_txq_inc_wrap - increment queue index, wrap back to beginning
699
* @trans: the transport (for configuration data)
700
* @index: current index
701
* Return: the queue index incremented, subject to wrapping
702
*/
703
static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
704
{
705
return ++index &
706
(trans->mac_cfg->base->max_tfd_queue_size - 1);
707
}
708
709
/**
710
* iwl_txq_dec_wrap - decrement queue index, wrap back to end
711
* @trans: the transport (for configuration data)
712
* @index: current index
713
* Return: the queue index decremented, subject to wrapping
714
*/
715
static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
716
{
717
return --index &
718
(trans->mac_cfg->base->max_tfd_queue_size - 1);
719
}
720
721
void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
722
723
static inline void
724
iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
725
{
726
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
727
728
if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
729
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
730
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
731
}
732
}
733
734
int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
735
struct iwl_tfh_tfd *tfd, dma_addr_t addr,
736
u16 len);
737
738
static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
739
struct iwl_tfh_tfd *tfd)
740
{
741
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
742
743
tfd->num_tbs = 0;
744
745
iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma,
746
trans_pcie->invalid_tx_cmd.size);
747
}
748
749
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
750
struct iwl_cmd_meta *meta,
751
struct iwl_tfh_tfd *tfd);
752
753
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
754
u32 sta_mask, u8 tid,
755
int size, unsigned int timeout);
756
757
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
758
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
759
760
void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
761
void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
762
int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
763
int slots_num, bool cmd_queue);
764
int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
765
int queue_size);
766
767
static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
768
void *_tfd, u8 idx)
769
{
770
struct iwl_tfd *tfd;
771
struct iwl_tfd_tb *tb;
772
773
if (trans->mac_cfg->gen2) {
774
struct iwl_tfh_tfd *tfh_tfd = _tfd;
775
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
776
777
return le16_to_cpu(tfh_tb->tb_len);
778
}
779
780
tfd = (struct iwl_tfd *)_tfd;
781
tb = &tfd->tbs[idx];
782
783
return le16_to_cpu(tb->hi_n_len) >> 4;
784
}
785
786
void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
787
struct sk_buff_head *skbs, bool is_flush);
788
void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
789
void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
790
unsigned long txqs, bool freeze);
791
int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
792
int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
793
794
/*****************************************************
795
* Error handling
796
******************************************************/
797
void iwl_pcie_dump_csr(struct iwl_trans *trans);
798
799
/*****************************************************
800
* Helpers
801
******************************************************/
802
static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
803
{
804
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
805
806
clear_bit(STATUS_INT_ENABLED, &trans->status);
807
if (!trans_pcie->msix_enabled) {
808
/* disable interrupts from uCode/NIC to host */
809
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
810
811
/* acknowledge/clear/reset any interrupts still pending
812
* from uCode or flow handler (Rx/Tx DMA) */
813
iwl_write32(trans, CSR_INT, 0xffffffff);
814
iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
815
} else {
816
/* disable all the interrupt we might use */
817
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
818
trans_pcie->fh_init_mask);
819
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
820
trans_pcie->hw_init_mask);
821
}
822
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
823
}
824
825
static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
826
int start)
827
{
828
int i = 0;
829
830
while (start < fw->num_sec &&
831
fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
832
fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
833
start++;
834
i++;
835
}
836
837
return i;
838
}
839
840
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
841
{
842
struct iwl_self_init_dram *dram = &trans->init_dram;
843
int i;
844
845
if (!dram->fw) {
846
WARN_ON(dram->fw_cnt);
847
return;
848
}
849
850
for (i = 0; i < dram->fw_cnt; i++)
851
dma_free_coherent(trans->dev, dram->fw[i].size,
852
dram->fw[i].block, dram->fw[i].physical);
853
854
kfree(dram->fw);
855
dram->fw_cnt = 0;
856
dram->fw = NULL;
857
}
858
859
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
860
{
861
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
862
863
spin_lock_bh(&trans_pcie->irq_lock);
864
_iwl_disable_interrupts(trans);
865
spin_unlock_bh(&trans_pcie->irq_lock);
866
}
867
868
static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
869
{
870
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
871
872
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
873
set_bit(STATUS_INT_ENABLED, &trans->status);
874
if (!trans_pcie->msix_enabled) {
875
trans_pcie->inta_mask = CSR_INI_SET_MASK;
876
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
877
} else {
878
/*
879
* fh/hw_mask keeps all the unmasked causes.
880
* Unlike msi, in msix cause is enabled when it is unset.
881
*/
882
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
883
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
884
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
885
~trans_pcie->fh_mask);
886
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
887
~trans_pcie->hw_mask);
888
}
889
}
890
891
static inline void iwl_enable_interrupts(struct iwl_trans *trans)
892
{
893
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
894
895
spin_lock_bh(&trans_pcie->irq_lock);
896
_iwl_enable_interrupts(trans);
897
spin_unlock_bh(&trans_pcie->irq_lock);
898
}
899
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
900
{
901
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
902
903
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
904
trans_pcie->hw_mask = msk;
905
}
906
907
static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
908
{
909
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
910
911
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
912
trans_pcie->fh_mask = msk;
913
}
914
915
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
916
{
917
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
918
919
IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
920
if (!trans_pcie->msix_enabled) {
921
trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
922
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
923
} else {
924
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
925
trans_pcie->hw_init_mask);
926
iwl_enable_fh_int_msk_msix(trans,
927
MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
928
}
929
}
930
931
static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans,
932
bool top_reset)
933
{
934
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
935
936
IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n",
937
top_reset ? "RESET" : "ALIVE");
938
939
if (!trans_pcie->msix_enabled) {
940
/*
941
* When we'll receive the ALIVE interrupt, the ISR will call
942
* iwl_enable_fw_load_int_ctx_info again to set the ALIVE
943
* interrupt (which is not really needed anymore) but also the
944
* RX interrupt which will allow us to receive the ALIVE
945
* notification (which is Rx) and continue the flow.
946
*/
947
if (top_reset)
948
trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE;
949
else
950
trans_pcie->inta_mask = CSR_INT_BIT_ALIVE |
951
CSR_INT_BIT_FH_RX;
952
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
953
} else {
954
u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE
955
: MSIX_HW_INT_CAUSES_REG_ALIVE;
956
957
iwl_enable_hw_int_msk_msix(trans, val);
958
959
if (top_reset)
960
return;
961
/*
962
* Leave all the FH causes enabled to get the ALIVE
963
* notification.
964
*/
965
iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
966
}
967
}
968
969
static inline const char *queue_name(struct device *dev,
970
struct iwl_trans_pcie *trans_p, int i)
971
{
972
if (trans_p->shared_vec_mask) {
973
int vec = trans_p->shared_vec_mask &
974
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
975
976
if (i == 0)
977
return DRV_NAME ":shared_IRQ";
978
979
return devm_kasprintf(dev, GFP_KERNEL,
980
DRV_NAME ":queue_%d", i + vec);
981
}
982
if (i == 0)
983
return DRV_NAME ":default_queue";
984
985
if (i == trans_p->alloc_vecs - 1)
986
return DRV_NAME ":exception";
987
988
return devm_kasprintf(dev, GFP_KERNEL,
989
DRV_NAME ":queue_%d", i);
990
}
991
992
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
993
{
994
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
995
996
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
997
if (!trans_pcie->msix_enabled) {
998
trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
999
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
1000
} else {
1001
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
1002
trans_pcie->fh_init_mask);
1003
iwl_enable_hw_int_msk_msix(trans,
1004
MSIX_HW_INT_CAUSES_REG_RF_KILL);
1005
}
1006
1007
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
1008
/*
1009
* On 9000-series devices this bit isn't enabled by default, so
1010
* when we power down the device we need set the bit to allow it
1011
* to wake up the PCI-E bus for RF-kill interrupts.
1012
*/
1013
iwl_set_bit(trans, CSR_GP_CNTRL,
1014
CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1015
}
1016
}
1017
1018
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
1019
1020
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
1021
{
1022
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1023
1024
lockdep_assert_held(&trans_pcie->mutex);
1025
1026
if (trans_pcie->debug_rfkill == 1)
1027
return true;
1028
1029
return !(iwl_read32(trans, CSR_GP_CNTRL) &
1030
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1031
}
1032
1033
static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1034
{
1035
return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
1036
}
1037
1038
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
1039
1040
#ifdef CONFIG_IWLWIFI_DEBUGFS
1041
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1042
void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
1043
#else
1044
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
1045
#endif
1046
1047
void iwl_pcie_rx_allocator_work(struct work_struct *data);
1048
1049
/* common trans ops for all generations transports */
1050
void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
1051
int _iwl_trans_pcie_start_hw(struct iwl_trans *trans);
1052
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
1053
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
1054
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
1055
void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
1056
u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
1057
u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
1058
void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
1059
int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1060
void *buf, int dwords);
1061
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
1062
struct iwl_trans_dump_data *
1063
iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
1064
const struct iwl_dump_sanitize_ops *sanitize_ops,
1065
void *sanitize_ctx);
1066
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1067
enum iwl_d3_status *status,
1068
bool test, bool reset);
1069
int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
1070
void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
1071
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1072
void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1073
u32 mask, u32 value);
1074
int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
1075
u32 *val);
1076
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
1077
void __releases(nic_access_nobh)
1078
iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
1079
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1080
int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
1081
const struct pci_device_id *ent,
1082
const struct iwl_mac_cfg *mac_cfg,
1083
u8 __iomem *hw_base, u32 hw_rev);
1084
1085
/* transport gen 1 exported functions */
1086
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
1087
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1088
const struct iwl_fw *fw,
1089
const struct fw_img *img,
1090
bool run_in_rfkill);
1091
void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
1092
1093
/* common functions that are used by gen2 transport */
1094
void iwl_trans_pcie_gen2_op_mode_leave(struct iwl_trans *trans);
1095
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1096
void iwl_pcie_apm_config(struct iwl_trans *trans);
1097
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1098
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1099
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1100
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1101
bool was_in_rfkill);
1102
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1103
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1104
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1105
struct iwl_dma_ptr *ptr, size_t size);
1106
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1107
void iwl_pcie_apply_destination(struct iwl_trans *trans);
1108
1109
/* transport gen 2 exported functions */
1110
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1111
const struct iwl_fw *fw,
1112
const struct fw_img *img,
1113
bool run_in_rfkill);
1114
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
1115
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
1116
int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
1117
struct iwl_host_cmd *cmd);
1118
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1119
struct iwl_host_cmd *cmd);
1120
void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
1121
u32 dst_addr, u64 src_addr, u32 byte_cnt);
1122
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
1123
u32 dst_addr, u64 src_addr, u32 byte_cnt);
1124
int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
1125
struct iwl_trans_rxq_dma_data *data);
1126
1127
#endif /* __iwl_trans_int_pcie_h__ */
1128
1129