Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/iwlwifi/pcie/gen1_2/trans.c
48406 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
3
* Copyright (C) 2007-2015, 2018-2024 Intel Corporation
4
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5
* Copyright (C) 2016-2017 Intel Deutschland GmbH
6
*/
7
#include <linux/pci.h>
8
#include <linux/interrupt.h>
9
#include <linux/debugfs.h>
10
#include <linux/sched.h>
11
#include <linux/bitops.h>
12
#include <linux/gfp.h>
13
#include <linux/vmalloc.h>
14
#include <linux/module.h>
15
#include <linux/wait.h>
16
#include <linux/seq_file.h>
17
#if defined(__FreeBSD__)
18
#include <sys/rman.h>
19
#include <linux/delay.h>
20
#endif
21
22
#include "iwl-drv.h"
23
#include "iwl-trans.h"
24
#include "iwl-csr.h"
25
#include "iwl-prph.h"
26
#include "iwl-scd.h"
27
#include "iwl-agn-hw.h"
28
#include "fw/error-dump.h"
29
#include "fw/dbg.h"
30
#include "fw/api/tx.h"
31
#include "fw/acpi.h"
32
#include "fw/api/tx.h"
33
#include "mei/iwl-mei.h"
34
#include "internal.h"
35
#include "iwl-fh.h"
36
#include "pcie/iwl-context-info-v2.h"
37
#include "pcie/utils.h"
38
39
/* extended range in FW SRAM */
40
#define IWL_FW_MEM_EXTENDED_START 0x40000
41
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
42
43
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
44
{
45
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
46
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
47
iwl_set_bit(trans, CSR_GP_CNTRL,
48
CSR_GP_CNTRL_REG_FLAG_SW_RESET);
49
usleep_range(10000, 20000);
50
} else {
51
iwl_set_bit(trans, CSR_RESET,
52
CSR_RESET_REG_FLAG_SW_RESET);
53
usleep_range(5000, 6000);
54
}
55
56
if (retake_ownership)
57
return iwl_pcie_prepare_card_hw(trans);
58
59
return 0;
60
}
61
62
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
63
{
64
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
65
66
if (!fw_mon->size)
67
return;
68
69
dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
70
fw_mon->physical);
71
72
fw_mon->block = NULL;
73
fw_mon->physical = 0;
74
fw_mon->size = 0;
75
}
76
77
static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
78
u8 max_power)
79
{
80
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
81
void *block = NULL;
82
dma_addr_t physical = 0;
83
u32 size = 0;
84
u8 power;
85
86
if (fw_mon->size) {
87
memset(fw_mon->block, 0, fw_mon->size);
88
return;
89
}
90
91
/* need at least 2 KiB, so stop at 11 */
92
for (power = max_power; power >= 11; power--) {
93
size = BIT(power);
94
block = dma_alloc_coherent(trans->dev, size, &physical,
95
GFP_KERNEL | __GFP_NOWARN);
96
if (!block)
97
continue;
98
99
IWL_INFO(trans,
100
"Allocated 0x%08x bytes for firmware monitor.\n",
101
size);
102
break;
103
}
104
105
if (WARN_ON_ONCE(!block))
106
return;
107
108
if (power != max_power)
109
IWL_ERR(trans,
110
"Sorry - debug buffer is only %luK while you requested %luK\n",
111
(unsigned long)BIT(power - 10),
112
(unsigned long)BIT(max_power - 10));
113
114
fw_mon->block = block;
115
fw_mon->physical = physical;
116
fw_mon->size = size;
117
}
118
119
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
120
{
121
if (!max_power) {
122
/* default max_power is maximum */
123
max_power = 26;
124
} else {
125
max_power += 11;
126
}
127
128
if (WARN(max_power > 26,
129
"External buffer size for monitor is too big %d, check the FW TLV\n",
130
max_power))
131
return;
132
133
iwl_pcie_alloc_fw_monitor_block(trans, max_power);
134
}
135
136
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
137
{
138
iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
139
((reg & 0x0000ffff) | (2 << 28)));
140
return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
141
}
142
143
static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
144
{
145
iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
146
iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
147
((reg & 0x0000ffff) | (3 << 28)));
148
}
149
150
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
151
{
152
if (trans->mac_cfg->base->apmg_not_supported)
153
return;
154
155
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
156
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
157
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
158
~APMG_PS_CTRL_MSK_PWR_SRC);
159
else
160
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
161
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
162
~APMG_PS_CTRL_MSK_PWR_SRC);
163
}
164
165
/* PCI registers */
166
#define PCI_CFG_RETRY_TIMEOUT 0x041
167
168
void iwl_pcie_apm_config(struct iwl_trans *trans)
169
{
170
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
171
u16 lctl;
172
u16 cap;
173
174
/*
175
* L0S states have been found to be unstable with our devices
176
* and in newer hardware they are not officially supported at
177
* all, so we must always set the L0S_DISABLED bit.
178
*/
179
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
180
181
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
182
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
183
184
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
185
trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
186
IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
187
(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
188
trans->ltr_enabled ? "En" : "Dis");
189
}
190
191
/*
192
* Start up NIC's basic functionality after it has been reset
193
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
194
* NOTE: This does not load uCode nor start the embedded processor
195
*/
196
static int iwl_pcie_apm_init(struct iwl_trans *trans)
197
{
198
int ret;
199
200
IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
201
202
/*
203
* Use "set_bit" below rather than "write", to preserve any hardware
204
* bits already set by default after reset.
205
*/
206
207
/* Disable L0S exit timer (platform NMI Work/Around) */
208
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
209
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
210
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
211
212
/*
213
* Disable L0s without affecting L1;
214
* don't wait for ICH L0s (ICH bug W/A)
215
*/
216
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
217
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
218
219
/* Set FH wait threshold to maximum (HW error during stress W/A) */
220
iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
221
222
/*
223
* Enable HAP INTA (interrupt from management bus) to
224
* wake device's PCI Express link L1a -> L0s
225
*/
226
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
227
CSR_HW_IF_CONFIG_REG_HAP_WAKE);
228
229
iwl_pcie_apm_config(trans);
230
231
/* Configure analog phase-lock-loop before activating to D0A */
232
if (trans->mac_cfg->base->pll_cfg)
233
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
234
235
ret = iwl_finish_nic_init(trans);
236
if (ret)
237
return ret;
238
239
if (trans->cfg->host_interrupt_operation_mode) {
240
/*
241
* This is a bit of an abuse - This is needed for 7260 / 3160
242
* only check host_interrupt_operation_mode even if this is
243
* not related to host_interrupt_operation_mode.
244
*
245
* Enable the oscillator to count wake up time for L1 exit. This
246
* consumes slightly more power (100uA) - but allows to be sure
247
* that we wake up from L1 on time.
248
*
249
* This looks weird: read twice the same register, discard the
250
* value, set a bit, and yet again, read that same register
251
* just to discard the value. But that's the way the hardware
252
* seems to like it.
253
*/
254
iwl_read_prph(trans, OSC_CLK);
255
iwl_read_prph(trans, OSC_CLK);
256
iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
257
iwl_read_prph(trans, OSC_CLK);
258
iwl_read_prph(trans, OSC_CLK);
259
}
260
261
/*
262
* Enable DMA clock and wait for it to stabilize.
263
*
264
* Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
265
* bits do not disable clocks. This preserves any hardware
266
* bits already set by default in "CLK_CTRL_REG" after reset.
267
*/
268
if (!trans->mac_cfg->base->apmg_not_supported) {
269
iwl_write_prph(trans, APMG_CLK_EN_REG,
270
APMG_CLK_VAL_DMA_CLK_RQT);
271
udelay(20);
272
273
/* Disable L1-Active */
274
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
275
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
276
277
/* Clear the interrupt in APMG if the NIC is in RFKILL */
278
iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
279
APMG_RTC_INT_STT_RFKILL);
280
}
281
282
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
283
284
return 0;
285
}
286
287
/*
288
* Enable LP XTAL to avoid HW bug where device may consume much power if
289
* FW is not loaded after device reset. LP XTAL is disabled by default
290
* after device HW reset. Do it only if XTAL is fed by internal source.
291
* Configure device's "persistence" mode to avoid resetting XTAL again when
292
* SHRD_HW_RST occurs in S3.
293
*/
294
static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
295
{
296
int ret;
297
u32 apmg_gp1_reg;
298
u32 apmg_xtal_cfg_reg;
299
u32 dl_cfg_reg;
300
301
/* Force XTAL ON */
302
iwl_trans_set_bit(trans, CSR_GP_CNTRL,
303
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
304
305
ret = iwl_trans_pcie_sw_reset(trans, true);
306
307
if (!ret)
308
ret = iwl_finish_nic_init(trans);
309
310
if (WARN_ON(ret)) {
311
/* Release XTAL ON request */
312
iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
313
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
314
return;
315
}
316
317
/*
318
* Clear "disable persistence" to avoid LP XTAL resetting when
319
* SHRD_HW_RST is applied in S3.
320
*/
321
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
322
APMG_PCIDEV_STT_VAL_PERSIST_DIS);
323
324
/*
325
* Force APMG XTAL to be active to prevent its disabling by HW
326
* caused by APMG idle state.
327
*/
328
apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
329
SHR_APMG_XTAL_CFG_REG);
330
iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
331
apmg_xtal_cfg_reg |
332
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
333
334
ret = iwl_trans_pcie_sw_reset(trans, true);
335
if (ret)
336
IWL_ERR(trans,
337
"iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
338
339
/* Enable LP XTAL by indirect access through CSR */
340
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
341
iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
342
SHR_APMG_GP1_WF_XTAL_LP_EN |
343
SHR_APMG_GP1_CHICKEN_BIT_SELECT);
344
345
/* Clear delay line clock power up */
346
dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
347
iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
348
~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
349
350
/*
351
* Enable persistence mode to avoid LP XTAL resetting when
352
* SHRD_HW_RST is applied in S3.
353
*/
354
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
355
CSR_HW_IF_CONFIG_REG_PERSISTENCE);
356
357
/*
358
* Clear "initialization complete" bit to move adapter from
359
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
360
*/
361
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
362
363
/* Activates XTAL resources monitor */
364
iwl_trans_set_bit(trans, CSR_MONITOR_CFG_REG,
365
CSR_MONITOR_XTAL_RESOURCES);
366
367
/* Release XTAL ON request */
368
iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
369
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
370
udelay(10);
371
372
/* Release APMG XTAL */
373
iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
374
apmg_xtal_cfg_reg &
375
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
376
}
377
378
void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
379
{
380
int ret;
381
382
/* stop device's busmaster DMA activity */
383
384
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
385
iwl_set_bit(trans, CSR_GP_CNTRL,
386
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
387
388
ret = iwl_poll_bits(trans, CSR_GP_CNTRL,
389
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
390
100);
391
usleep_range(10000, 20000);
392
} else {
393
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
394
395
ret = iwl_poll_bits(trans, CSR_RESET,
396
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
397
}
398
399
if (ret)
400
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
401
402
IWL_DEBUG_INFO(trans, "stop master\n");
403
}
404
405
static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
406
{
407
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
408
409
if (op_mode_leave) {
410
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
411
iwl_pcie_apm_init(trans);
412
413
/* inform ME that we are leaving */
414
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000)
415
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
416
APMG_PCIDEV_STT_VAL_WAKE_ME);
417
else if (trans->mac_cfg->device_family >=
418
IWL_DEVICE_FAMILY_8000) {
419
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
420
CSR_RESET_LINK_PWR_MGMT_DISABLED);
421
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
422
CSR_HW_IF_CONFIG_REG_WAKE_ME |
423
CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
424
mdelay(1);
425
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
426
CSR_RESET_LINK_PWR_MGMT_DISABLED);
427
}
428
mdelay(5);
429
}
430
431
clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
432
433
/* Stop device's DMA activity */
434
iwl_pcie_apm_stop_master(trans);
435
436
if (trans->cfg->lp_xtal_workaround) {
437
iwl_pcie_apm_lp_xtal_enable(trans);
438
return;
439
}
440
441
iwl_trans_pcie_sw_reset(trans, false);
442
443
/*
444
* Clear "initialization complete" bit to move adapter from
445
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
446
*/
447
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
448
}
449
450
static int iwl_pcie_nic_init(struct iwl_trans *trans)
451
{
452
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
453
int ret;
454
455
/* nic_init */
456
spin_lock_bh(&trans_pcie->irq_lock);
457
ret = iwl_pcie_apm_init(trans);
458
spin_unlock_bh(&trans_pcie->irq_lock);
459
460
if (ret)
461
return ret;
462
463
iwl_pcie_set_pwr(trans, false);
464
465
iwl_op_mode_nic_config(trans->op_mode);
466
467
/* Allocate the RX queue, or reset if it is already allocated */
468
ret = iwl_pcie_rx_init(trans);
469
if (ret)
470
return ret;
471
472
/* Allocate or reset and init all Tx and Command queues */
473
if (iwl_pcie_tx_init(trans)) {
474
iwl_pcie_rx_free(trans);
475
return -ENOMEM;
476
}
477
478
if (trans->mac_cfg->base->shadow_reg_enable) {
479
/* enable shadow regs in HW */
480
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
481
IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
482
}
483
484
return 0;
485
}
486
487
#define HW_READY_TIMEOUT (50)
488
489
/* Note: returns poll_bit return value, which is >= 0 if success */
490
static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
491
{
492
int ret;
493
494
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
495
CSR_HW_IF_CONFIG_REG_PCI_OWN_SET);
496
497
/* See if we got it */
498
ret = iwl_poll_bits(trans, CSR_HW_IF_CONFIG_REG,
499
CSR_HW_IF_CONFIG_REG_PCI_OWN_SET,
500
HW_READY_TIMEOUT);
501
502
if (!ret)
503
iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
504
505
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret ? " not" : "");
506
return ret;
507
}
508
509
/* Note: returns standard 0/-ERROR code */
510
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
511
{
512
int ret;
513
int iter;
514
515
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
516
517
ret = iwl_pcie_set_hw_ready(trans);
518
/* If the card is ready, exit 0 */
519
if (!ret) {
520
trans->csme_own = false;
521
return 0;
522
}
523
524
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
525
CSR_RESET_LINK_PWR_MGMT_DISABLED);
526
usleep_range(1000, 2000);
527
528
for (iter = 0; iter < 10; iter++) {
529
int t = 0;
530
531
/* If HW is not ready, prepare the conditions to check again */
532
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
533
CSR_HW_IF_CONFIG_REG_WAKE_ME);
534
535
do {
536
ret = iwl_pcie_set_hw_ready(trans);
537
if (!ret) {
538
trans->csme_own = false;
539
return 0;
540
}
541
542
if (iwl_mei_is_connected()) {
543
IWL_DEBUG_INFO(trans,
544
"Couldn't prepare the card but SAP is connected\n");
545
trans->csme_own = true;
546
if (trans->mac_cfg->device_family !=
547
IWL_DEVICE_FAMILY_9000)
548
IWL_ERR(trans,
549
"SAP not supported for this NIC family\n");
550
551
return -EBUSY;
552
}
553
554
usleep_range(200, 1000);
555
t += 200;
556
} while (t < 150000);
557
msleep(25);
558
}
559
560
IWL_ERR(trans, "Couldn't prepare the card\n");
561
562
return ret;
563
}
564
565
/*
566
* ucode
567
*/
568
static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
569
u32 dst_addr, dma_addr_t phy_addr,
570
u32 byte_cnt)
571
{
572
iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
573
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
574
575
iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
576
dst_addr);
577
578
iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
579
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
580
581
iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
582
(iwl_get_dma_hi_addr(phy_addr)
583
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
584
585
iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
586
BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
587
BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
588
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
589
590
iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
591
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
592
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
593
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
594
}
595
596
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
597
u32 dst_addr, dma_addr_t phy_addr,
598
u32 byte_cnt)
599
{
600
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
601
int ret;
602
603
trans_pcie->ucode_write_complete = false;
604
605
if (!iwl_trans_grab_nic_access(trans))
606
return -EIO;
607
608
iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
609
byte_cnt);
610
iwl_trans_release_nic_access(trans);
611
612
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
613
trans_pcie->ucode_write_complete, 5 * HZ);
614
if (!ret) {
615
IWL_ERR(trans, "Failed to load firmware chunk!\n");
616
iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);
617
return -ETIMEDOUT;
618
}
619
620
return 0;
621
}
622
623
static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
624
const struct fw_desc *section)
625
{
626
u8 *v_addr;
627
dma_addr_t p_addr;
628
u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
629
int ret = 0;
630
631
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
632
section_num);
633
634
v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
635
GFP_KERNEL | __GFP_NOWARN);
636
if (!v_addr) {
637
IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
638
chunk_sz = PAGE_SIZE;
639
v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
640
&p_addr, GFP_KERNEL);
641
if (!v_addr)
642
return -ENOMEM;
643
}
644
645
for (offset = 0; offset < section->len; offset += chunk_sz) {
646
u32 copy_size, dst_addr;
647
bool extended_addr = false;
648
649
copy_size = min_t(u32, chunk_sz, section->len - offset);
650
dst_addr = section->offset + offset;
651
652
if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
653
dst_addr <= IWL_FW_MEM_EXTENDED_END)
654
extended_addr = true;
655
656
if (extended_addr)
657
iwl_set_bits_prph(trans, LMPM_CHICK,
658
LMPM_CHICK_EXTENDED_ADDR_SPACE);
659
660
memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
661
ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
662
copy_size);
663
664
if (extended_addr)
665
iwl_clear_bits_prph(trans, LMPM_CHICK,
666
LMPM_CHICK_EXTENDED_ADDR_SPACE);
667
668
if (ret) {
669
IWL_ERR(trans,
670
"Could not load the [%d] uCode section\n",
671
section_num);
672
break;
673
}
674
}
675
676
dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
677
return ret;
678
}
679
680
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
681
const struct fw_img *image,
682
int cpu,
683
int *first_ucode_section)
684
{
685
int shift_param;
686
int i, ret = 0, sec_num = 0x1;
687
u32 val, last_read_idx = 0;
688
689
if (cpu == 1) {
690
shift_param = 0;
691
*first_ucode_section = 0;
692
} else {
693
shift_param = 16;
694
(*first_ucode_section)++;
695
}
696
697
for (i = *first_ucode_section; i < image->num_sec; i++) {
698
last_read_idx = i;
699
700
/*
701
* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
702
* CPU1 to CPU2.
703
* PAGING_SEPARATOR_SECTION delimiter - separate between
704
* CPU2 non paged to CPU2 paging sec.
705
*/
706
if (!image->sec[i].data ||
707
image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
708
image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
709
IWL_DEBUG_FW(trans,
710
"Break since Data not valid or Empty section, sec = %d\n",
711
i);
712
break;
713
}
714
715
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
716
if (ret)
717
return ret;
718
719
/* Notify ucode of loaded section number and status */
720
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
721
val = val | (sec_num << shift_param);
722
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
723
724
sec_num = (sec_num << 1) | 0x1;
725
}
726
727
*first_ucode_section = last_read_idx;
728
729
iwl_enable_interrupts(trans);
730
731
if (trans->mac_cfg->gen2) {
732
if (cpu == 1)
733
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
734
0xFFFF);
735
else
736
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
737
0xFFFFFFFF);
738
} else {
739
if (cpu == 1)
740
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
741
0xFFFF);
742
else
743
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
744
0xFFFFFFFF);
745
}
746
747
return 0;
748
}
749
750
static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
751
const struct fw_img *image,
752
int cpu,
753
int *first_ucode_section)
754
{
755
int i, ret = 0;
756
u32 last_read_idx = 0;
757
758
if (cpu == 1)
759
*first_ucode_section = 0;
760
else
761
(*first_ucode_section)++;
762
763
for (i = *first_ucode_section; i < image->num_sec; i++) {
764
last_read_idx = i;
765
766
/*
767
* CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
768
* CPU1 to CPU2.
769
* PAGING_SEPARATOR_SECTION delimiter - separate between
770
* CPU2 non paged to CPU2 paging sec.
771
*/
772
if (!image->sec[i].data ||
773
image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
774
image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
775
IWL_DEBUG_FW(trans,
776
"Break since Data not valid or Empty section, sec = %d\n",
777
i);
778
break;
779
}
780
781
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
782
if (ret)
783
return ret;
784
}
785
786
*first_ucode_section = last_read_idx;
787
788
return 0;
789
}
790
791
static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
792
{
793
enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
794
struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
795
&trans->dbg.fw_mon_cfg[alloc_id];
796
struct iwl_dram_data *frag;
797
798
if (!iwl_trans_dbg_ini_valid(trans))
799
return;
800
801
if (le32_to_cpu(fw_mon_cfg->buf_location) ==
802
IWL_FW_INI_LOCATION_SRAM_PATH) {
803
IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
804
/* set sram monitor by enabling bit 7 */
805
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
806
CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
807
808
return;
809
}
810
811
if (le32_to_cpu(fw_mon_cfg->buf_location) !=
812
IWL_FW_INI_LOCATION_DRAM_PATH ||
813
!trans->dbg.fw_mon_ini[alloc_id].num_frags)
814
return;
815
816
frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
817
818
IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
819
alloc_id);
820
821
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
822
frag->physical >> MON_BUFF_SHIFT_VER2);
823
iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
824
(frag->physical + frag->size - 256) >>
825
MON_BUFF_SHIFT_VER2);
826
}
827
828
void iwl_pcie_apply_destination(struct iwl_trans *trans)
829
{
830
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
831
const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
832
int i;
833
834
if (iwl_trans_dbg_ini_valid(trans)) {
835
iwl_pcie_apply_destination_ini(trans);
836
return;
837
}
838
839
IWL_INFO(trans, "Applying debug destination %s\n",
840
get_fw_dbg_mode_string(dest->monitor_mode));
841
842
if (dest->monitor_mode == EXTERNAL_MODE)
843
iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
844
else
845
IWL_WARN(trans, "PCI should have external buffer debug\n");
846
847
for (i = 0; i < trans->dbg.n_dest_reg; i++) {
848
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
849
u32 val = le32_to_cpu(dest->reg_ops[i].val);
850
851
switch (dest->reg_ops[i].op) {
852
case CSR_ASSIGN:
853
iwl_write32(trans, addr, val);
854
break;
855
case CSR_SETBIT:
856
iwl_set_bit(trans, addr, BIT(val));
857
break;
858
case CSR_CLEARBIT:
859
iwl_clear_bit(trans, addr, BIT(val));
860
break;
861
case PRPH_ASSIGN:
862
iwl_write_prph(trans, addr, val);
863
break;
864
case PRPH_SETBIT:
865
iwl_set_bits_prph(trans, addr, BIT(val));
866
break;
867
case PRPH_CLEARBIT:
868
iwl_clear_bits_prph(trans, addr, BIT(val));
869
break;
870
case PRPH_BLOCKBIT:
871
if (iwl_read_prph(trans, addr) & BIT(val)) {
872
IWL_ERR(trans,
873
"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
874
val, addr);
875
goto monitor;
876
}
877
break;
878
default:
879
IWL_ERR(trans, "FW debug - unknown OP %d\n",
880
dest->reg_ops[i].op);
881
break;
882
}
883
}
884
885
monitor:
886
if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
887
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
888
fw_mon->physical >> dest->base_shift);
889
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
890
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
891
(fw_mon->physical + fw_mon->size -
892
256) >> dest->end_shift);
893
else
894
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
895
(fw_mon->physical + fw_mon->size) >>
896
dest->end_shift);
897
}
898
}
899
900
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
901
const struct fw_img *image)
902
{
903
int ret = 0;
904
int first_ucode_section;
905
906
IWL_DEBUG_FW(trans, "working with %s CPU\n",
907
image->is_dual_cpus ? "Dual" : "Single");
908
909
/* load to FW the binary non secured sections of CPU1 */
910
ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
911
if (ret)
912
return ret;
913
914
if (image->is_dual_cpus) {
915
/* set CPU2 header address */
916
iwl_write_prph(trans,
917
LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
918
LMPM_SECURE_CPU2_HDR_MEM_SPACE);
919
920
/* load to FW the binary sections of CPU2 */
921
ret = iwl_pcie_load_cpu_sections(trans, image, 2,
922
&first_ucode_section);
923
if (ret)
924
return ret;
925
}
926
927
if (iwl_pcie_dbg_on(trans))
928
iwl_pcie_apply_destination(trans);
929
930
iwl_enable_interrupts(trans);
931
932
/* release CPU reset */
933
iwl_write32(trans, CSR_RESET, 0);
934
935
return 0;
936
}
937
938
static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
939
const struct fw_img *image)
940
{
941
int ret = 0;
942
int first_ucode_section;
943
944
IWL_DEBUG_FW(trans, "working with %s CPU\n",
945
image->is_dual_cpus ? "Dual" : "Single");
946
947
if (iwl_pcie_dbg_on(trans))
948
iwl_pcie_apply_destination(trans);
949
950
IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
951
iwl_read_prph(trans, WFPM_GP2));
952
953
/*
954
* Set default value. On resume reading the values that were
955
* zeored can provide debug data on the resume flow.
956
* This is for debugging only and has no functional impact.
957
*/
958
iwl_write_prph(trans, WFPM_GP2, 0x01010101);
959
960
/* configure the ucode to be ready to get the secured image */
961
/* release CPU reset */
962
iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
963
964
/* load to FW the binary Secured sections of CPU1 */
965
ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
966
&first_ucode_section);
967
if (ret)
968
return ret;
969
970
/* load to FW the binary sections of CPU2 */
971
return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
972
&first_ucode_section);
973
}
974
975
bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
976
{
977
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
978
bool hw_rfkill = iwl_is_rfkill_set(trans);
979
bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
980
bool report;
981
982
if (hw_rfkill) {
983
set_bit(STATUS_RFKILL_HW, &trans->status);
984
set_bit(STATUS_RFKILL_OPMODE, &trans->status);
985
} else {
986
clear_bit(STATUS_RFKILL_HW, &trans->status);
987
if (trans_pcie->opmode_down)
988
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
989
}
990
991
report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
992
993
if (prev != report)
994
iwl_trans_pcie_rf_kill(trans, report, false);
995
996
return hw_rfkill;
997
}
998
999
struct iwl_causes_list {
1000
u16 mask_reg;
1001
u8 bit;
1002
u8 addr;
1003
};
1004
1005
#define IWL_CAUSE(reg, mask) \
1006
{ \
1007
.mask_reg = reg, \
1008
.bit = ilog2(mask), \
1009
.addr = ilog2(mask) + \
1010
((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
1011
(reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \
1012
0xffff), /* causes overflow warning */ \
1013
}
1014
1015
static const struct iwl_causes_list causes_list_common[] = {
1016
IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
1017
IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
1018
IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
1019
IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
1020
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
1021
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
1022
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
1023
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR),
1024
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
1025
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
1026
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
1027
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
1028
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
1029
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
1030
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
1031
};
1032
1033
static const struct iwl_causes_list causes_list_pre_bz[] = {
1034
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
1035
};
1036
1037
static const struct iwl_causes_list causes_list_bz[] = {
1038
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
1039
};
1040
1041
static void iwl_pcie_map_list(struct iwl_trans *trans,
1042
const struct iwl_causes_list *causes,
1043
int arr_size, int val)
1044
{
1045
int i;
1046
1047
for (i = 0; i < arr_size; i++) {
1048
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1049
iwl_clear_bit(trans, causes[i].mask_reg,
1050
BIT(causes[i].bit));
1051
}
1052
}
1053
1054
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1055
{
1056
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1057
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1058
/*
1059
* Access all non RX causes and map them to the default irq.
1060
* In case we are missing at least one interrupt vector,
1061
* the first interrupt vector will serve non-RX and FBQ causes.
1062
*/
1063
iwl_pcie_map_list(trans, causes_list_common,
1064
ARRAY_SIZE(causes_list_common), val);
1065
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1066
iwl_pcie_map_list(trans, causes_list_bz,
1067
ARRAY_SIZE(causes_list_bz), val);
1068
else
1069
iwl_pcie_map_list(trans, causes_list_pre_bz,
1070
ARRAY_SIZE(causes_list_pre_bz), val);
1071
}
1072
1073
static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1074
{
1075
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1076
u32 offset =
1077
trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1078
u32 val, idx;
1079
1080
/*
1081
* The first RX queue - fallback queue, which is designated for
1082
* management frame, command responses etc, is always mapped to the
1083
* first interrupt vector. The other RX queues are mapped to
1084
* the other (N - 2) interrupt vectors.
1085
*/
1086
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1087
for (idx = 1; idx < trans->info.num_rxqs; idx++) {
1088
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1089
MSIX_FH_INT_CAUSES_Q(idx - offset));
1090
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1091
}
1092
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1093
1094
val = MSIX_FH_INT_CAUSES_Q(0);
1095
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1096
val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1097
iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1098
1099
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1100
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1101
}
1102
1103
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1104
{
1105
struct iwl_trans *trans = trans_pcie->trans;
1106
1107
if (!trans_pcie->msix_enabled) {
1108
if (trans->mac_cfg->mq_rx_supported &&
1109
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1110
iwl_write_umac_prph(trans, UREG_CHICK,
1111
UREG_CHICK_MSI_ENABLE);
1112
return;
1113
}
1114
/*
1115
* The IVAR table needs to be configured again after reset,
1116
* but if the device is disabled, we can't write to
1117
* prph.
1118
*/
1119
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1120
iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1121
1122
/*
1123
* Each cause from the causes list above and the RX causes is
1124
* represented as a byte in the IVAR table. The first nibble
1125
* represents the bound interrupt vector of the cause, the second
1126
* represents no auto clear for this cause. This will be set if its
1127
* interrupt vector is bound to serve other causes.
1128
*/
1129
iwl_pcie_map_rx_causes(trans);
1130
1131
iwl_pcie_map_non_rx_causes(trans);
1132
}
1133
1134
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1135
{
1136
struct iwl_trans *trans = trans_pcie->trans;
1137
1138
iwl_pcie_conf_msix_hw(trans_pcie);
1139
1140
if (!trans_pcie->msix_enabled)
1141
return;
1142
1143
trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1144
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1145
trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1146
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1147
}
1148
1149
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
1150
{
1151
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1152
1153
lockdep_assert_held(&trans_pcie->mutex);
1154
1155
if (trans_pcie->is_down)
1156
return;
1157
1158
trans_pcie->is_down = true;
1159
1160
/* tell the device to stop sending interrupts */
1161
iwl_disable_interrupts(trans);
1162
1163
/* device going down, Stop using ICT table */
1164
iwl_pcie_disable_ict(trans);
1165
1166
/*
1167
* If a HW restart happens during firmware loading,
1168
* then the firmware loading might call this function
1169
* and later it might be called again due to the
1170
* restart. So don't process again if the device is
1171
* already dead.
1172
*/
1173
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1174
IWL_DEBUG_INFO(trans,
1175
"DEVICE_ENABLED bit was set and is now cleared\n");
1176
if (!from_irq)
1177
iwl_pcie_synchronize_irqs(trans);
1178
iwl_pcie_rx_napi_sync(trans);
1179
iwl_pcie_tx_stop(trans);
1180
iwl_pcie_rx_stop(trans);
1181
1182
/* Power-down device's busmaster DMA clocks */
1183
if (!trans->mac_cfg->base->apmg_not_supported) {
1184
iwl_write_prph(trans, APMG_CLK_DIS_REG,
1185
APMG_CLK_VAL_DMA_CLK_RQT);
1186
udelay(5);
1187
}
1188
}
1189
1190
/* Make sure (redundant) we've released our request to stay awake */
1191
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1192
iwl_clear_bit(trans, CSR_GP_CNTRL,
1193
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1194
else
1195
iwl_clear_bit(trans, CSR_GP_CNTRL,
1196
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1197
1198
/* Stop the device, and put it in low power state */
1199
iwl_pcie_apm_stop(trans, false);
1200
1201
/* re-take ownership to prevent other users from stealing the device */
1202
iwl_trans_pcie_sw_reset(trans, true);
1203
1204
/*
1205
* Upon stop, the IVAR table gets erased, so msi-x won't
1206
* work. This causes a bug in RF-KILL flows, since the interrupt
1207
* that enables radio won't fire on the correct irq, and the
1208
* driver won't be able to handle the interrupt.
1209
* Configure the IVAR table again after reset.
1210
*/
1211
iwl_pcie_conf_msix_hw(trans_pcie);
1212
1213
/*
1214
* Upon stop, the APM issues an interrupt if HW RF kill is set.
1215
* This is a bug in certain verions of the hardware.
1216
* Certain devices also keep sending HW RF kill interrupt all
1217
* the time, unless the interrupt is ACKed even if the interrupt
1218
* should be masked. Re-ACK all the interrupts here.
1219
*/
1220
iwl_disable_interrupts(trans);
1221
1222
/* clear all status bits */
1223
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1224
clear_bit(STATUS_INT_ENABLED, &trans->status);
1225
clear_bit(STATUS_TPOWER_PMI, &trans->status);
1226
1227
/*
1228
* Even if we stop the HW, we still want the RF kill
1229
* interrupt
1230
*/
1231
iwl_enable_rfkill_int(trans);
1232
}
1233
1234
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1235
{
1236
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1237
1238
if (trans_pcie->msix_enabled) {
1239
int i;
1240
1241
for (i = 0; i < trans_pcie->alloc_vecs; i++)
1242
synchronize_irq(trans_pcie->msix_entries[i].vector);
1243
} else {
1244
synchronize_irq(trans_pcie->pci_dev->irq);
1245
}
1246
}
1247
1248
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1249
const struct iwl_fw *fw,
1250
const struct fw_img *img,
1251
bool run_in_rfkill)
1252
{
1253
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1254
bool hw_rfkill;
1255
int ret;
1256
1257
/* This may fail if AMT took ownership of the device */
1258
if (iwl_pcie_prepare_card_hw(trans)) {
1259
IWL_WARN(trans, "Exit HW not ready\n");
1260
return -EIO;
1261
}
1262
1263
iwl_enable_rfkill_int(trans);
1264
1265
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1266
1267
/*
1268
* We enabled the RF-Kill interrupt and the handler may very
1269
* well be running. Disable the interrupts to make sure no other
1270
* interrupt can be fired.
1271
*/
1272
iwl_disable_interrupts(trans);
1273
1274
/* Make sure it finished running */
1275
iwl_pcie_synchronize_irqs(trans);
1276
1277
mutex_lock(&trans_pcie->mutex);
1278
1279
/* If platform's RF_KILL switch is NOT set to KILL */
1280
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1281
if (hw_rfkill && !run_in_rfkill) {
1282
ret = -ERFKILL;
1283
goto out;
1284
}
1285
1286
/* Someone called stop_device, don't try to start_fw */
1287
if (trans_pcie->is_down) {
1288
IWL_WARN(trans,
1289
"Can't start_fw since the HW hasn't been started\n");
1290
ret = -EIO;
1291
goto out;
1292
}
1293
1294
/* make sure rfkill handshake bits are cleared */
1295
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1296
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1297
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1298
1299
/* clear (again), then enable host interrupts */
1300
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1301
1302
ret = iwl_pcie_nic_init(trans);
1303
if (ret) {
1304
IWL_ERR(trans, "Unable to init nic\n");
1305
goto out;
1306
}
1307
1308
/*
1309
* Now, we load the firmware and don't want to be interrupted, even
1310
* by the RF-Kill interrupt (hence mask all the interrupt besides the
1311
* FH_TX interrupt which is needed to load the firmware). If the
1312
* RF-Kill switch is toggled, we will find out after having loaded
1313
* the firmware and return the proper value to the caller.
1314
*/
1315
iwl_enable_fw_load_int(trans);
1316
1317
/* really make sure rfkill handshake bits are cleared */
1318
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1319
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1320
1321
/* Load the given image to the HW */
1322
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1323
ret = iwl_pcie_load_given_ucode_8000(trans, img);
1324
else
1325
ret = iwl_pcie_load_given_ucode(trans, img);
1326
1327
/* re-check RF-Kill state since we may have missed the interrupt */
1328
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1329
if (hw_rfkill && !run_in_rfkill)
1330
ret = -ERFKILL;
1331
1332
out:
1333
mutex_unlock(&trans_pcie->mutex);
1334
return ret;
1335
}
1336
1337
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
1338
{
1339
iwl_pcie_reset_ict(trans);
1340
iwl_pcie_tx_start(trans);
1341
}
1342
1343
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1344
bool was_in_rfkill)
1345
{
1346
bool hw_rfkill;
1347
1348
/*
1349
* Check again since the RF kill state may have changed while
1350
* all the interrupts were disabled, in this case we couldn't
1351
* receive the RF kill interrupt and update the state in the
1352
* op_mode.
1353
* Don't call the op_mode if the rkfill state hasn't changed.
1354
* This allows the op_mode to call stop_device from the rfkill
1355
* notification without endless recursion. Under very rare
1356
* circumstances, we might have a small recursion if the rfkill
1357
* state changed exactly now while we were called from stop_device.
1358
* This is very unlikely but can happen and is supported.
1359
*/
1360
hw_rfkill = iwl_is_rfkill_set(trans);
1361
if (hw_rfkill) {
1362
set_bit(STATUS_RFKILL_HW, &trans->status);
1363
set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1364
} else {
1365
clear_bit(STATUS_RFKILL_HW, &trans->status);
1366
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1367
}
1368
if (hw_rfkill != was_in_rfkill)
1369
iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
1370
}
1371
1372
void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1373
{
1374
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1375
bool was_in_rfkill;
1376
1377
iwl_op_mode_time_point(trans->op_mode,
1378
IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
1379
NULL);
1380
1381
mutex_lock(&trans_pcie->mutex);
1382
trans_pcie->opmode_down = true;
1383
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1384
_iwl_trans_pcie_stop_device(trans, false);
1385
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1386
mutex_unlock(&trans_pcie->mutex);
1387
}
1388
1389
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
1390
{
1391
struct iwl_trans_pcie __maybe_unused *trans_pcie =
1392
IWL_TRANS_GET_PCIE_TRANS(trans);
1393
1394
lockdep_assert_held(&trans_pcie->mutex);
1395
1396
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1397
state ? "disabled" : "enabled");
1398
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
1399
!WARN_ON(trans->mac_cfg->gen2))
1400
_iwl_trans_pcie_stop_device(trans, from_irq);
1401
}
1402
1403
static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1404
bool test, bool reset)
1405
{
1406
iwl_disable_interrupts(trans);
1407
1408
/*
1409
* in testing mode, the host stays awake and the
1410
* hardware won't be reset (not even partially)
1411
*/
1412
if (test)
1413
return;
1414
1415
iwl_pcie_disable_ict(trans);
1416
1417
iwl_pcie_synchronize_irqs(trans);
1418
1419
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1420
iwl_clear_bit(trans, CSR_GP_CNTRL,
1421
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1422
iwl_clear_bit(trans, CSR_GP_CNTRL,
1423
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
1424
} else {
1425
iwl_clear_bit(trans, CSR_GP_CNTRL,
1426
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1427
iwl_clear_bit(trans, CSR_GP_CNTRL,
1428
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1429
}
1430
1431
if (reset) {
1432
/*
1433
* reset TX queues -- some of their registers reset during S3
1434
* so if we don't reset everything here the D3 image would try
1435
* to execute some invalid memory upon resume
1436
*/
1437
iwl_trans_pcie_tx_reset(trans);
1438
}
1439
1440
iwl_pcie_set_pwr(trans, true);
1441
}
1442
1443
static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
1444
{
1445
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1446
int ret;
1447
1448
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
1449
return 0;
1450
1451
trans_pcie->sx_state = IWL_SX_WAITING;
1452
1453
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
1454
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1455
suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
1456
UREG_DOORBELL_TO_ISR6_RESUME);
1457
else
1458
iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
1459
suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
1460
CSR_IPC_SLEEP_CONTROL_RESUME);
1461
1462
ret = wait_event_timeout(trans_pcie->sx_waitq,
1463
trans_pcie->sx_state != IWL_SX_WAITING,
1464
2 * HZ);
1465
if (!ret) {
1466
IWL_ERR(trans, "Timeout %s D3\n",
1467
suspend ? "entering" : "exiting");
1468
ret = -ETIMEDOUT;
1469
} else {
1470
ret = 0;
1471
}
1472
1473
if (trans_pcie->sx_state == IWL_SX_ERROR) {
1474
IWL_ERR(trans, "FW error while %s D3\n",
1475
suspend ? "entering" : "exiting");
1476
ret = -EIO;
1477
}
1478
1479
/* Invalidate it toward next suspend or resume */
1480
trans_pcie->sx_state = IWL_SX_INVALID;
1481
1482
return ret;
1483
}
1484
1485
int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
1486
{
1487
int ret;
1488
1489
if (!reset)
1490
/* Enable persistence mode to avoid reset */
1491
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1492
CSR_HW_IF_CONFIG_REG_PERSISTENCE);
1493
1494
ret = iwl_pcie_d3_handshake(trans, true);
1495
if (ret)
1496
return ret;
1497
1498
iwl_pcie_d3_complete_suspend(trans, test, reset);
1499
1500
return 0;
1501
}
1502
1503
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1504
enum iwl_d3_status *status,
1505
bool test, bool reset)
1506
{
1507
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1508
u32 val;
1509
int ret;
1510
1511
if (test) {
1512
iwl_enable_interrupts(trans);
1513
*status = IWL_D3_STATUS_ALIVE;
1514
ret = 0;
1515
goto out;
1516
}
1517
1518
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1519
iwl_set_bit(trans, CSR_GP_CNTRL,
1520
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1521
else
1522
iwl_set_bit(trans, CSR_GP_CNTRL,
1523
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1524
1525
ret = iwl_finish_nic_init(trans);
1526
if (ret)
1527
return ret;
1528
1529
/*
1530
* Reconfigure IVAR table in case of MSIX or reset ict table in
1531
* MSI mode since HW reset erased it.
1532
* Also enables interrupts - none will happen as
1533
* the device doesn't know we're waking it up, only when
1534
* the opmode actually tells it after this call.
1535
*/
1536
iwl_pcie_conf_msix_hw(trans_pcie);
1537
if (!trans_pcie->msix_enabled)
1538
iwl_pcie_reset_ict(trans);
1539
iwl_enable_interrupts(trans);
1540
1541
iwl_pcie_set_pwr(trans, false);
1542
1543
if (!reset) {
1544
iwl_clear_bit(trans, CSR_GP_CNTRL,
1545
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1546
} else {
1547
iwl_trans_pcie_tx_reset(trans);
1548
1549
ret = iwl_pcie_rx_init(trans);
1550
if (ret) {
1551
IWL_ERR(trans,
1552
"Failed to resume the device (RX reset)\n");
1553
return ret;
1554
}
1555
}
1556
1557
IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1558
iwl_read_umac_prph(trans, WFPM_GP2));
1559
1560
val = iwl_read32(trans, CSR_RESET);
1561
if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1562
*status = IWL_D3_STATUS_RESET;
1563
else
1564
*status = IWL_D3_STATUS_ALIVE;
1565
1566
out:
1567
if (*status == IWL_D3_STATUS_ALIVE)
1568
ret = iwl_pcie_d3_handshake(trans, false);
1569
else
1570
trans->state = IWL_TRANS_NO_FW;
1571
1572
return ret;
1573
}
1574
1575
static void
1576
iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1577
struct iwl_trans *trans,
1578
const struct iwl_mac_cfg *mac_cfg,
1579
struct iwl_trans_info *info)
1580
{
1581
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1582
int max_irqs, num_irqs, i, ret;
1583
u16 pci_cmd;
1584
u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1585
1586
if (!mac_cfg->mq_rx_supported)
1587
goto enable_msi;
1588
1589
if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
1590
max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1591
1592
max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1593
for (i = 0; i < max_irqs; i++)
1594
trans_pcie->msix_entries[i].entry = i;
1595
1596
num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1597
MSIX_MIN_INTERRUPT_VECTORS,
1598
max_irqs);
1599
if (num_irqs < 0) {
1600
IWL_DEBUG_INFO(trans,
1601
"Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1602
num_irqs);
1603
goto enable_msi;
1604
}
1605
trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1606
1607
IWL_DEBUG_INFO(trans,
1608
"MSI-X enabled. %d interrupt vectors were allocated\n",
1609
num_irqs);
1610
1611
/*
1612
* In case the OS provides fewer interrupts than requested, different
1613
* causes will share the same interrupt vector as follows:
1614
* One interrupt less: non rx causes shared with FBQ.
1615
* Two interrupts less: non rx causes shared with FBQ and RSS.
1616
* More than two interrupts: we will use fewer RSS queues.
1617
*/
1618
if (num_irqs <= max_irqs - 2) {
1619
info->num_rxqs = num_irqs + 1;
1620
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1621
IWL_SHARED_IRQ_FIRST_RSS;
1622
} else if (num_irqs == max_irqs - 1) {
1623
info->num_rxqs = num_irqs;
1624
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1625
} else {
1626
info->num_rxqs = num_irqs - 1;
1627
}
1628
1629
IWL_DEBUG_INFO(trans,
1630
"MSI-X enabled with rx queues %d, vec mask 0x%x\n",
1631
info->num_rxqs, trans_pcie->shared_vec_mask);
1632
1633
WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES);
1634
1635
trans_pcie->alloc_vecs = num_irqs;
1636
trans_pcie->msix_enabled = true;
1637
return;
1638
1639
enable_msi:
1640
info->num_rxqs = 1;
1641
ret = pci_enable_msi(pdev);
1642
if (ret) {
1643
dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1644
/* enable rfkill interrupt: hw bug w/a */
1645
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1646
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1647
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1648
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1649
}
1650
}
1651
}
1652
1653
static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans,
1654
struct iwl_trans_info *info)
1655
{
1656
#if defined(CONFIG_SMP)
1657
int iter_rx_q, i, ret, cpu, offset;
1658
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1659
1660
i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1661
iter_rx_q = info->num_rxqs - 1 + i;
1662
offset = 1 + i;
1663
for (; i < iter_rx_q ; i++) {
1664
/*
1665
* Get the cpu prior to the place to search
1666
* (i.e. return will be > i - 1).
1667
*/
1668
cpu = cpumask_next(i - offset, cpu_online_mask);
1669
cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1670
ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1671
&trans_pcie->affinity_mask[i]);
1672
if (ret)
1673
IWL_ERR(trans_pcie->trans,
1674
"Failed to set affinity mask for IRQ %d\n",
1675
trans_pcie->msix_entries[i].vector);
1676
}
1677
#endif
1678
}
1679
1680
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1681
struct iwl_trans_pcie *trans_pcie,
1682
struct iwl_trans_info *info)
1683
{
1684
int i;
1685
1686
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1687
int ret;
1688
struct msix_entry *msix_entry;
1689
const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1690
1691
if (!qname)
1692
return -ENOMEM;
1693
1694
msix_entry = &trans_pcie->msix_entries[i];
1695
ret = devm_request_threaded_irq(&pdev->dev,
1696
msix_entry->vector,
1697
iwl_pcie_msix_isr,
1698
(i == trans_pcie->def_irq) ?
1699
iwl_pcie_irq_msix_handler :
1700
iwl_pcie_irq_rx_msix_handler,
1701
IRQF_SHARED,
1702
qname,
1703
msix_entry);
1704
if (ret) {
1705
IWL_ERR(trans_pcie->trans,
1706
"Error allocating IRQ %d\n", i);
1707
1708
return ret;
1709
}
1710
}
1711
iwl_pcie_irq_set_affinity(trans_pcie->trans, info);
1712
1713
return 0;
1714
}
1715
1716
static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1717
{
1718
u32 hpm, wprot;
1719
1720
switch (trans->mac_cfg->device_family) {
1721
case IWL_DEVICE_FAMILY_9000:
1722
wprot = PREG_PRPH_WPROT_9000;
1723
break;
1724
case IWL_DEVICE_FAMILY_22000:
1725
wprot = PREG_PRPH_WPROT_22000;
1726
break;
1727
default:
1728
return 0;
1729
}
1730
1731
hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1732
if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) {
1733
u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1734
1735
if (wprot_val & PREG_WFPM_ACCESS) {
1736
IWL_ERR(trans,
1737
"Error, can not clear persistence bit\n");
1738
return -EPERM;
1739
}
1740
iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1741
hpm & ~PERSISTENCE_BIT);
1742
}
1743
1744
return 0;
1745
}
1746
1747
static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1748
{
1749
int ret;
1750
1751
ret = iwl_finish_nic_init(trans);
1752
if (ret < 0)
1753
return ret;
1754
1755
iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1756
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1757
udelay(20);
1758
iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1759
HPM_HIPM_GEN_CFG_CR_PG_EN |
1760
HPM_HIPM_GEN_CFG_CR_SLP_EN);
1761
udelay(20);
1762
iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1763
HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1764
1765
return iwl_trans_pcie_sw_reset(trans, true);
1766
}
1767
1768
int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1769
{
1770
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1771
int err;
1772
1773
lockdep_assert_held(&trans_pcie->mutex);
1774
1775
err = iwl_pcie_prepare_card_hw(trans);
1776
if (err) {
1777
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1778
return err;
1779
}
1780
1781
err = iwl_trans_pcie_clear_persistence_bit(trans);
1782
if (err)
1783
return err;
1784
1785
err = iwl_trans_pcie_sw_reset(trans, true);
1786
if (err)
1787
return err;
1788
1789
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1790
trans->mac_cfg->integrated) {
1791
err = iwl_pcie_gen2_force_power_gating(trans);
1792
if (err)
1793
return err;
1794
}
1795
1796
err = iwl_pcie_apm_init(trans);
1797
if (err)
1798
return err;
1799
1800
iwl_pcie_init_msix(trans_pcie);
1801
1802
/* From now on, the op_mode will be kept updated about RF kill state */
1803
iwl_enable_rfkill_int(trans);
1804
1805
trans_pcie->opmode_down = false;
1806
1807
/* Set is_down to false here so that...*/
1808
trans_pcie->is_down = false;
1809
1810
/* ...rfkill can call stop_device and set it false if needed */
1811
iwl_pcie_check_hw_rf_kill(trans);
1812
1813
return 0;
1814
}
1815
1816
int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1817
{
1818
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1819
int ret;
1820
1821
mutex_lock(&trans_pcie->mutex);
1822
ret = _iwl_trans_pcie_start_hw(trans);
1823
mutex_unlock(&trans_pcie->mutex);
1824
1825
return ret;
1826
}
1827
1828
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1829
{
1830
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1831
1832
mutex_lock(&trans_pcie->mutex);
1833
1834
/* disable interrupts - don't enable HW RF kill interrupt */
1835
iwl_disable_interrupts(trans);
1836
1837
iwl_pcie_apm_stop(trans, true);
1838
1839
iwl_disable_interrupts(trans);
1840
1841
iwl_pcie_disable_ict(trans);
1842
1843
mutex_unlock(&trans_pcie->mutex);
1844
1845
iwl_pcie_synchronize_irqs(trans);
1846
}
1847
1848
#if defined(__linux__)
1849
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1850
{
1851
writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1852
}
1853
1854
void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1855
{
1856
writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1857
}
1858
1859
u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1860
{
1861
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1862
}
1863
#elif defined(__FreeBSD__)
1864
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1865
{
1866
bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);
1867
}
1868
1869
void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1870
{
1871
bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);
1872
}
1873
1874
u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1875
{
1876
u32 v;
1877
1878
v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs);
1879
return (v);
1880
}
1881
#endif
1882
1883
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1884
{
1885
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1886
return 0x00FFFFFF;
1887
else
1888
return 0x000FFFFF;
1889
}
1890
1891
u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1892
{
1893
u32 mask = iwl_trans_pcie_prph_msk(trans);
1894
1895
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1896
((reg & mask) | (3 << 24)));
1897
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1898
}
1899
1900
void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
1901
{
1902
u32 mask = iwl_trans_pcie_prph_msk(trans);
1903
1904
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1905
((addr & mask) | (3 << 24)));
1906
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1907
}
1908
1909
void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
1910
{
1911
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1912
1913
/* free all first - we might be reconfigured for a different size */
1914
iwl_pcie_free_rbs_pool(trans);
1915
1916
trans_pcie->rx_page_order =
1917
iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);
1918
trans_pcie->rx_buf_bytes =
1919
iwl_trans_get_rb_size(trans->conf.rx_buf_size);
1920
}
1921
1922
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
1923
struct device *dev)
1924
{
1925
u8 i;
1926
struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
1927
1928
/* free DRAM payloads */
1929
for (i = 0; i < dram_regions->n_regions; i++) {
1930
dma_free_coherent(dev, dram_regions->drams[i].size,
1931
dram_regions->drams[i].block,
1932
dram_regions->drams[i].physical);
1933
}
1934
dram_regions->n_regions = 0;
1935
1936
/* free DRAM addresses array */
1937
if (desc_dram->block) {
1938
dma_free_coherent(dev, desc_dram->size,
1939
desc_dram->block,
1940
desc_dram->physical);
1941
}
1942
memset(desc_dram, 0, sizeof(*desc_dram));
1943
}
1944
1945
static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
1946
{
1947
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1948
1949
iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd);
1950
}
1951
1952
static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
1953
{
1954
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1955
struct iwl_cmd_header_wide bad_cmd = {
1956
.cmd = INVALID_WR_PTR_CMD,
1957
.group_id = DEBUG_GROUP,
1958
.sequence = cpu_to_le16(0xffff),
1959
.length = cpu_to_le16(0),
1960
.version = 0,
1961
};
1962
int ret;
1963
1964
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd,
1965
sizeof(bad_cmd));
1966
if (ret)
1967
return ret;
1968
memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
1969
return 0;
1970
}
1971
1972
void iwl_trans_pcie_free(struct iwl_trans *trans)
1973
{
1974
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1975
int i;
1976
1977
iwl_pcie_synchronize_irqs(trans);
1978
1979
if (trans->mac_cfg->gen2)
1980
iwl_txq_gen2_tx_free(trans);
1981
else
1982
iwl_pcie_tx_free(trans);
1983
iwl_pcie_rx_free(trans);
1984
1985
if (trans_pcie->rba.alloc_wq) {
1986
destroy_workqueue(trans_pcie->rba.alloc_wq);
1987
trans_pcie->rba.alloc_wq = NULL;
1988
}
1989
1990
if (trans_pcie->msix_enabled) {
1991
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1992
irq_set_affinity_hint(
1993
trans_pcie->msix_entries[i].vector,
1994
NULL);
1995
}
1996
1997
trans_pcie->msix_enabled = false;
1998
} else {
1999
iwl_pcie_free_ict(trans);
2000
}
2001
2002
free_netdev(trans_pcie->napi_dev);
2003
2004
iwl_pcie_free_invalid_tx_cmd(trans);
2005
2006
iwl_pcie_free_fw_monitor(trans);
2007
2008
iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
2009
trans->dev);
2010
iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
2011
trans->dev);
2012
2013
mutex_destroy(&trans_pcie->mutex);
2014
2015
#ifdef CONFIG_INET
2016
if (trans_pcie->txqs.tso_hdr_page) {
2017
for_each_possible_cpu(i) {
2018
struct iwl_tso_hdr_page *p =
2019
per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
2020
2021
if (p && p->page)
2022
__free_page(p->page);
2023
}
2024
2025
free_percpu(trans_pcie->txqs.tso_hdr_page);
2026
}
2027
#endif
2028
2029
iwl_trans_free(trans);
2030
}
2031
2032
static union acpi_object *
2033
iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value)
2034
{
2035
#ifdef CONFIG_ACPI
2036
struct iwl_dsm_internal_product_reset_cmd pldr_arg = {
2037
.cmd = cmd,
2038
.value = value,
2039
};
2040
union acpi_object arg = {
2041
.buffer.type = ACPI_TYPE_BUFFER,
2042
.buffer.length = sizeof(pldr_arg),
2043
.buffer.pointer = (void *)&pldr_arg,
2044
};
2045
static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
2046
0x81, 0x4F, 0x75, 0xE4,
2047
0xDD, 0x26, 0xB5, 0xFD);
2048
2049
if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV,
2050
DSM_INTERNAL_FUNC_PRODUCT_RESET))
2051
return ERR_PTR(-ENODEV);
2052
2053
return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV,
2054
DSM_INTERNAL_FUNC_PRODUCT_RESET,
2055
&arg, &dsm_guid);
2056
#else
2057
return ERR_PTR(-EOPNOTSUPP);
2058
#endif
2059
}
2060
2061
void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev)
2062
{
2063
union acpi_object *res;
2064
2065
res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
2066
DSM_INTERNAL_PLDR_CMD_GET_MODE,
2067
0);
2068
if (IS_ERR(res))
2069
return;
2070
2071
if (res->type != ACPI_TYPE_INTEGER)
2072
IWL_ERR_DEV(&pdev->dev,
2073
"unexpected return type from product reset DSM\n");
2074
else
2075
IWL_DEBUG_DEV_POWER(&pdev->dev,
2076
"product reset mode is 0x%llx\n",
2077
res->integer.value);
2078
2079
ACPI_FREE(res);
2080
}
2081
2082
static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable,
2083
bool integrated)
2084
{
2085
union acpi_object *res;
2086
u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0;
2087
2088
if (!integrated)
2089
mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR |
2090
DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON;
2091
2092
res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
2093
DSM_INTERNAL_PLDR_CMD_SET_MODE,
2094
mode);
2095
if (IS_ERR(res)) {
2096
if (enable)
2097
IWL_ERR_DEV(&pdev->dev,
2098
"ACPI _DSM not available (%d), cannot do product reset\n",
2099
(int)PTR_ERR(res));
2100
return;
2101
}
2102
2103
ACPI_FREE(res);
2104
IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n",
2105
enable ? "En" : "Dis");
2106
iwl_trans_pcie_check_product_reset_mode(pdev);
2107
}
2108
2109
void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev)
2110
{
2111
union acpi_object *res;
2112
2113
res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
2114
DSM_INTERNAL_PLDR_CMD_GET_STATUS,
2115
0);
2116
if (IS_ERR(res))
2117
return;
2118
2119
if (res->type != ACPI_TYPE_INTEGER)
2120
IWL_ERR_DEV(&pdev->dev,
2121
"unexpected return type from product reset DSM\n");
2122
else
2123
IWL_DEBUG_DEV_POWER(&pdev->dev,
2124
"product reset status is 0x%llx\n",
2125
res->integer.value);
2126
2127
ACPI_FREE(res);
2128
}
2129
2130
static void iwl_trans_pcie_call_reset(struct pci_dev *pdev)
2131
{
2132
#ifdef CONFIG_ACPI
2133
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
2134
union acpi_object *p, *ref;
2135
acpi_status status;
2136
int ret = -EINVAL;
2137
2138
status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev),
2139
"_PRR", NULL, &buffer);
2140
if (ACPI_FAILURE(status)) {
2141
IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n");
2142
goto out;
2143
}
2144
p = buffer.pointer;
2145
2146
if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) {
2147
pci_err(pdev, "Bad _PRR return type\n");
2148
goto out;
2149
}
2150
2151
ref = &p->package.elements[0];
2152
if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {
2153
pci_err(pdev, "_PRR wasn't a reference\n");
2154
goto out;
2155
}
2156
2157
status = acpi_evaluate_object(ref->reference.handle,
2158
"_RST", NULL, NULL);
2159
if (ACPI_FAILURE(status)) {
2160
pci_err(pdev,
2161
"Failed to call _RST on object returned by _PRR (%d)\n",
2162
status);
2163
goto out;
2164
}
2165
ret = 0;
2166
out:
2167
kfree(buffer.pointer);
2168
if (!ret) {
2169
IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n");
2170
return;
2171
}
2172
IWL_DEBUG_DEV_POWER(&pdev->dev,
2173
"No BIOS support, using pci_reset_function()\n");
2174
#endif
2175
pci_reset_function(pdev);
2176
}
2177
2178
struct iwl_trans_pcie_removal {
2179
struct pci_dev *pdev;
2180
struct work_struct work;
2181
enum iwl_reset_mode mode;
2182
bool integrated;
2183
};
2184
2185
static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
2186
{
2187
struct iwl_trans_pcie_removal *removal =
2188
container_of(wk, struct iwl_trans_pcie_removal, work);
2189
struct pci_dev *pdev = removal->pdev;
2190
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
2191
struct pci_bus *bus;
2192
2193
pci_lock_rescan_remove();
2194
2195
bus = pdev->bus;
2196
/* in this case, something else already removed the device */
2197
if (!bus)
2198
goto out;
2199
2200
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
2201
2202
if (removal->mode == IWL_RESET_MODE_PROD_RESET) {
2203
struct pci_dev *bt = NULL;
2204
2205
if (!removal->integrated) {
2206
/* discrete devices have WiFi/BT at function 0/1 */
2207
int slot = PCI_SLOT(pdev->devfn);
2208
int func = PCI_FUNC(pdev->devfn);
2209
2210
if (func == 0)
2211
bt = pci_get_slot(bus, PCI_DEVFN(slot, 1));
2212
else
2213
pci_info(pdev, "Unexpected function %d\n",
2214
func);
2215
} else {
2216
/* on integrated we have to look up by ID (same bus) */
2217
static const struct pci_device_id bt_device_ids[] = {
2218
#define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) }
2219
BT_DEV(0xA876), /* LNL */
2220
BT_DEV(0xE476), /* PTL-P */
2221
BT_DEV(0xE376), /* PTL-H */
2222
BT_DEV(0xD346), /* NVL-H */
2223
BT_DEV(0x6E74), /* NVL-S */
2224
BT_DEV(0x4D76), /* WCL */
2225
BT_DEV(0xD246), /* RZL-H */
2226
BT_DEV(0x6C46), /* RZL-M */
2227
{}
2228
};
2229
struct pci_dev *tmp = NULL;
2230
2231
for_each_pci_dev(tmp) {
2232
if (tmp->bus != bus)
2233
continue;
2234
2235
if (pci_match_id(bt_device_ids, tmp)) {
2236
bt = tmp;
2237
break;
2238
}
2239
}
2240
}
2241
2242
if (bt) {
2243
pci_info(bt, "Removal by WiFi due to product reset\n");
2244
pci_stop_and_remove_bus_device(bt);
2245
pci_dev_put(bt);
2246
}
2247
}
2248
2249
iwl_trans_pcie_set_product_reset(pdev,
2250
removal->mode ==
2251
IWL_RESET_MODE_PROD_RESET,
2252
removal->integrated);
2253
if (removal->mode >= IWL_RESET_MODE_FUNC_RESET)
2254
iwl_trans_pcie_call_reset(pdev);
2255
2256
pci_stop_and_remove_bus_device(pdev);
2257
pci_dev_put(pdev);
2258
2259
if (removal->mode >= IWL_RESET_MODE_RESCAN) {
2260
#if defined(__linux__)
2261
if (bus->parent)
2262
bus = bus->parent;
2263
#elif defined(__FreeBSD__)
2264
/* XXX-TODO */
2265
#endif
2266
pci_rescan_bus(bus);
2267
}
2268
2269
out:
2270
pci_unlock_rescan_remove();
2271
2272
kfree(removal);
2273
module_put(THIS_MODULE);
2274
}
2275
2276
void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
2277
{
2278
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2279
struct iwl_trans_pcie_removal *removal;
2280
char _msg = 0, *msg = &_msg;
2281
2282
if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY ||
2283
mode == IWL_RESET_MODE_BACKOFF))
2284
return;
2285
2286
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2287
return;
2288
2289
if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
2290
mode = IWL_RESET_MODE_FUNC_RESET;
2291
if (trans_pcie->me_present < 0)
2292
msg = " instead of product reset as ME may be present";
2293
else
2294
msg = " instead of product reset as ME is present";
2295
}
2296
2297
IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg);
2298
2299
iwl_pcie_dump_csr(trans);
2300
2301
/*
2302
* get a module reference to avoid doing this
2303
* while unloading anyway and to avoid
2304
* scheduling a work with code that's being
2305
* removed.
2306
*/
2307
if (!try_module_get(THIS_MODULE)) {
2308
IWL_ERR(trans,
2309
"Module is being unloaded - abort\n");
2310
return;
2311
}
2312
2313
removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2314
if (!removal) {
2315
module_put(THIS_MODULE);
2316
return;
2317
}
2318
/*
2319
* we don't need to clear this flag, because
2320
* the trans will be freed and reallocated.
2321
*/
2322
set_bit(STATUS_TRANS_DEAD, &trans->status);
2323
2324
removal->pdev = to_pci_dev(trans->dev);
2325
removal->mode = mode;
2326
removal->integrated = trans->mac_cfg->integrated;
2327
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2328
pci_dev_get(removal->pdev);
2329
schedule_work(&removal->work);
2330
}
2331
EXPORT_SYMBOL(iwl_trans_pcie_reset);
2332
2333
/*
2334
* This version doesn't disable BHs but rather assumes they're
2335
* already disabled.
2336
*/
2337
bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
2338
{
2339
int ret;
2340
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2341
u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
2342
u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2343
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
2344
u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
2345
2346
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2347
return false;
2348
2349
spin_lock(&trans_pcie->reg_lock);
2350
2351
if (trans_pcie->cmd_hold_nic_awake)
2352
goto out;
2353
2354
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
2355
write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
2356
mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2357
poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2358
}
2359
2360
/* this bit wakes up the NIC */
2361
iwl_trans_set_bit(trans, CSR_GP_CNTRL, write);
2362
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
2363
udelay(2);
2364
2365
/*
2366
* These bits say the device is running, and should keep running for
2367
* at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2368
* but they do not indicate that embedded SRAM is restored yet;
2369
* HW with volatile SRAM must save/restore contents to/from
2370
* host DRAM when sleeping/waking for power-saving.
2371
* Each direction takes approximately 1/4 millisecond; with this
2372
* overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2373
* series of register accesses are expected (e.g. reading Event Log),
2374
* to keep device from sleeping.
2375
*
2376
* CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2377
* SRAM is okay/restored. We don't check that here because this call
2378
* is just for hardware register access; but GP1 MAC_SLEEP
2379
* check is a good idea before accessing the SRAM of HW with
2380
* volatile SRAM (e.g. reading Event Log).
2381
*
2382
* 5000 series and later (including 1000 series) have non-volatile SRAM,
2383
* and do not save/restore SRAM when power cycling.
2384
*/
2385
ret = iwl_poll_bits_mask(trans, CSR_GP_CNTRL, poll, mask, 15000);
2386
if (unlikely(ret)) {
2387
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2388
2389
if (silent) {
2390
spin_unlock(&trans_pcie->reg_lock);
2391
return false;
2392
}
2393
2394
WARN_ONCE(1,
2395
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2396
cntrl);
2397
2398
iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);
2399
2400
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
2401
iwl_trans_pcie_reset(trans,
2402
IWL_RESET_MODE_REMOVE_ONLY);
2403
else
2404
iwl_write32(trans, CSR_RESET,
2405
CSR_RESET_REG_FLAG_FORCE_NMI);
2406
2407
spin_unlock(&trans_pcie->reg_lock);
2408
return false;
2409
}
2410
2411
out:
2412
/*
2413
* Fool sparse by faking we release the lock - sparse will
2414
* track nic_access anyway.
2415
*/
2416
__release(&trans_pcie->reg_lock);
2417
return true;
2418
}
2419
2420
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2421
{
2422
bool ret;
2423
2424
local_bh_disable();
2425
ret = __iwl_trans_pcie_grab_nic_access(trans, false);
2426
if (ret) {
2427
/* keep BHs disabled until iwl_trans_pcie_release_nic_access */
2428
return ret;
2429
}
2430
local_bh_enable();
2431
return false;
2432
}
2433
2434
void __releases(nic_access_nobh)
2435
iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
2436
{
2437
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2438
2439
lockdep_assert_held(&trans_pcie->reg_lock);
2440
2441
/*
2442
* Fool sparse by faking we acquiring the lock - sparse will
2443
* track nic_access anyway.
2444
*/
2445
__acquire(&trans_pcie->reg_lock);
2446
2447
if (trans_pcie->cmd_hold_nic_awake)
2448
goto out;
2449
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2450
iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
2451
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
2452
else
2453
iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
2454
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2455
/*
2456
* Above we read the CSR_GP_CNTRL register, which will flush
2457
* any previous writes, but we need the write that clears the
2458
* MAC_ACCESS_REQ bit to be performed before any other writes
2459
* scheduled on different CPUs (after we drop reg_lock).
2460
*/
2461
out:
2462
__release(nic_access_nobh);
2463
spin_unlock_bh(&trans_pcie->reg_lock);
2464
}
2465
2466
int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2467
void *buf, int dwords)
2468
{
2469
#define IWL_MAX_HW_ERRS 5
2470
unsigned int num_consec_hw_errors = 0;
2471
int offs = 0;
2472
u32 *vals = buf;
2473
2474
while (offs < dwords) {
2475
/* limit the time we spin here under lock to 1/2s */
2476
unsigned long end = jiffies + HZ / 2;
2477
bool resched = false;
2478
2479
if (iwl_trans_grab_nic_access(trans)) {
2480
iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2481
addr + 4 * offs);
2482
2483
while (offs < dwords) {
2484
vals[offs] = iwl_read32(trans,
2485
HBUS_TARG_MEM_RDAT);
2486
2487
if (iwl_trans_is_hw_error_value(vals[offs]))
2488
num_consec_hw_errors++;
2489
else
2490
num_consec_hw_errors = 0;
2491
2492
if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) {
2493
iwl_trans_release_nic_access(trans);
2494
return -EIO;
2495
}
2496
2497
offs++;
2498
2499
if (time_after(jiffies, end)) {
2500
resched = true;
2501
break;
2502
}
2503
}
2504
iwl_trans_release_nic_access(trans);
2505
2506
if (resched)
2507
cond_resched();
2508
} else {
2509
return -EBUSY;
2510
}
2511
}
2512
2513
return 0;
2514
}
2515
2516
int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2517
u32 *val)
2518
{
2519
return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2520
ofs, val);
2521
}
2522
2523
#define IWL_FLUSH_WAIT_MS 2000
2524
2525
int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2526
struct iwl_trans_rxq_dma_data *data)
2527
{
2528
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2529
2530
if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)
2531
return -EINVAL;
2532
2533
data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2534
data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2535
data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2536
data->fr_bd_wid = 0;
2537
2538
return 0;
2539
}
2540
2541
int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2542
{
2543
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2544
struct iwl_txq *txq;
2545
unsigned long now = jiffies;
2546
bool overflow_tx;
2547
u8 wr_ptr;
2548
2549
/* Make sure the NIC is still alive in the bus */
2550
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2551
return -ENODEV;
2552
2553
if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
2554
return -EINVAL;
2555
2556
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2557
txq = trans_pcie->txqs.txq[txq_idx];
2558
2559
spin_lock_bh(&txq->lock);
2560
overflow_tx = txq->overflow_tx ||
2561
!skb_queue_empty(&txq->overflow_q);
2562
spin_unlock_bh(&txq->lock);
2563
2564
wr_ptr = READ_ONCE(txq->write_ptr);
2565
2566
while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2567
overflow_tx) &&
2568
!time_after(jiffies,
2569
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2570
u8 write_ptr = READ_ONCE(txq->write_ptr);
2571
2572
/*
2573
* If write pointer moved during the wait, warn only
2574
* if the TX came from op mode. In case TX came from
2575
* trans layer (overflow TX) don't warn.
2576
*/
2577
if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2578
"WR pointer moved while flushing %d -> %d\n",
2579
wr_ptr, write_ptr))
2580
return -ETIMEDOUT;
2581
wr_ptr = write_ptr;
2582
2583
usleep_range(1000, 2000);
2584
2585
spin_lock_bh(&txq->lock);
2586
overflow_tx = txq->overflow_tx ||
2587
!skb_queue_empty(&txq->overflow_q);
2588
spin_unlock_bh(&txq->lock);
2589
}
2590
2591
if (txq->read_ptr != txq->write_ptr) {
2592
IWL_ERR(trans,
2593
"fail to flush all tx fifo queues Q %d\n", txq_idx);
2594
iwl_txq_log_scd_error(trans, txq);
2595
return -ETIMEDOUT;
2596
}
2597
2598
IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2599
2600
return 0;
2601
}
2602
2603
int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2604
{
2605
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2606
int cnt;
2607
int ret = 0;
2608
2609
/* waiting for all the tx frames complete might take a while */
2610
for (cnt = 0;
2611
cnt < trans->mac_cfg->base->num_of_queues;
2612
cnt++) {
2613
2614
if (cnt == trans->conf.cmd_queue)
2615
continue;
2616
if (!test_bit(cnt, trans_pcie->txqs.queue_used))
2617
continue;
2618
if (!(BIT(cnt) & txq_bm))
2619
continue;
2620
2621
ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2622
if (ret)
2623
break;
2624
}
2625
2626
return ret;
2627
}
2628
2629
void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2630
u32 mask, u32 value)
2631
{
2632
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2633
2634
spin_lock_bh(&trans_pcie->reg_lock);
2635
_iwl_trans_set_bits_mask(trans, reg, mask, value);
2636
spin_unlock_bh(&trans_pcie->reg_lock);
2637
}
2638
2639
static const char *get_csr_string(int cmd)
2640
{
2641
#define IWL_CMD(x) case x: return #x
2642
switch (cmd) {
2643
IWL_CMD(CSR_HW_IF_CONFIG_REG);
2644
IWL_CMD(CSR_INT_COALESCING);
2645
IWL_CMD(CSR_INT);
2646
IWL_CMD(CSR_INT_MASK);
2647
IWL_CMD(CSR_FH_INT_STATUS);
2648
IWL_CMD(CSR_GPIO_IN);
2649
IWL_CMD(CSR_RESET);
2650
IWL_CMD(CSR_GP_CNTRL);
2651
IWL_CMD(CSR_HW_REV);
2652
IWL_CMD(CSR_EEPROM_REG);
2653
IWL_CMD(CSR_EEPROM_GP);
2654
IWL_CMD(CSR_OTP_GP_REG);
2655
IWL_CMD(CSR_GIO_REG);
2656
IWL_CMD(CSR_GP_UCODE_REG);
2657
IWL_CMD(CSR_GP_DRIVER_REG);
2658
IWL_CMD(CSR_UCODE_DRV_GP1);
2659
IWL_CMD(CSR_UCODE_DRV_GP2);
2660
IWL_CMD(CSR_LED_REG);
2661
IWL_CMD(CSR_DRAM_INT_TBL_REG);
2662
IWL_CMD(CSR_GIO_CHICKEN_BITS);
2663
IWL_CMD(CSR_ANA_PLL_CFG);
2664
IWL_CMD(CSR_HW_REV_WA_REG);
2665
IWL_CMD(CSR_MONITOR_STATUS_REG);
2666
IWL_CMD(CSR_DBG_HPET_MEM_REG);
2667
default:
2668
return "UNKNOWN";
2669
}
2670
#undef IWL_CMD
2671
}
2672
2673
void iwl_pcie_dump_csr(struct iwl_trans *trans)
2674
{
2675
int i;
2676
static const u32 csr_tbl[] = {
2677
CSR_HW_IF_CONFIG_REG,
2678
CSR_INT_COALESCING,
2679
CSR_INT,
2680
CSR_INT_MASK,
2681
CSR_FH_INT_STATUS,
2682
CSR_GPIO_IN,
2683
CSR_RESET,
2684
CSR_GP_CNTRL,
2685
CSR_HW_REV,
2686
CSR_EEPROM_REG,
2687
CSR_EEPROM_GP,
2688
CSR_OTP_GP_REG,
2689
CSR_GIO_REG,
2690
CSR_GP_UCODE_REG,
2691
CSR_GP_DRIVER_REG,
2692
CSR_UCODE_DRV_GP1,
2693
CSR_UCODE_DRV_GP2,
2694
CSR_LED_REG,
2695
CSR_DRAM_INT_TBL_REG,
2696
CSR_GIO_CHICKEN_BITS,
2697
CSR_ANA_PLL_CFG,
2698
CSR_MONITOR_STATUS_REG,
2699
CSR_HW_REV_WA_REG,
2700
CSR_DBG_HPET_MEM_REG
2701
};
2702
IWL_ERR(trans, "CSR values:\n");
2703
IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2704
"CSR_INT_PERIODIC_REG)\n");
2705
for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2706
IWL_ERR(trans, " %25s: 0X%08x\n",
2707
get_csr_string(csr_tbl[i]),
2708
iwl_read32(trans, csr_tbl[i]));
2709
}
2710
}
2711
2712
#ifdef CONFIG_IWLWIFI_DEBUGFS
2713
/* create and remove of files */
2714
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
2715
debugfs_create_file(#name, mode, parent, trans, \
2716
&iwl_dbgfs_##name##_ops); \
2717
} while (0)
2718
2719
/* file operation */
2720
#define DEBUGFS_READ_FILE_OPS(name) \
2721
static const struct file_operations iwl_dbgfs_##name##_ops = { \
2722
.read = iwl_dbgfs_##name##_read, \
2723
.open = simple_open, \
2724
.llseek = generic_file_llseek, \
2725
};
2726
2727
#define DEBUGFS_WRITE_FILE_OPS(name) \
2728
static const struct file_operations iwl_dbgfs_##name##_ops = { \
2729
.write = iwl_dbgfs_##name##_write, \
2730
.open = simple_open, \
2731
.llseek = generic_file_llseek, \
2732
};
2733
2734
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
2735
static const struct file_operations iwl_dbgfs_##name##_ops = { \
2736
.write = iwl_dbgfs_##name##_write, \
2737
.read = iwl_dbgfs_##name##_read, \
2738
.open = simple_open, \
2739
.llseek = generic_file_llseek, \
2740
};
2741
2742
struct iwl_dbgfs_tx_queue_priv {
2743
struct iwl_trans *trans;
2744
};
2745
2746
struct iwl_dbgfs_tx_queue_state {
2747
loff_t pos;
2748
};
2749
2750
static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2751
{
2752
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2753
struct iwl_dbgfs_tx_queue_state *state;
2754
2755
if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
2756
return NULL;
2757
2758
state = kmalloc(sizeof(*state), GFP_KERNEL);
2759
if (!state)
2760
return NULL;
2761
state->pos = *pos;
2762
return state;
2763
}
2764
2765
static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2766
void *v, loff_t *pos)
2767
{
2768
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2769
struct iwl_dbgfs_tx_queue_state *state = v;
2770
2771
*pos = ++state->pos;
2772
2773
if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
2774
return NULL;
2775
2776
return state;
2777
}
2778
2779
static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2780
{
2781
kfree(v);
2782
}
2783
2784
static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2785
{
2786
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2787
struct iwl_dbgfs_tx_queue_state *state = v;
2788
struct iwl_trans *trans = priv->trans;
2789
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2790
struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
2791
2792
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2793
(unsigned int)state->pos,
2794
!!test_bit(state->pos, trans_pcie->txqs.queue_used),
2795
!!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
2796
if (txq)
2797
seq_printf(seq,
2798
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2799
txq->read_ptr, txq->write_ptr,
2800
txq->need_update, txq->frozen,
2801
txq->n_window, txq->ampdu);
2802
else
2803
seq_puts(seq, "(unallocated)");
2804
2805
if (state->pos == trans->conf.cmd_queue)
2806
seq_puts(seq, " (HCMD)");
2807
seq_puts(seq, "\n");
2808
2809
return 0;
2810
}
2811
2812
static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2813
.start = iwl_dbgfs_tx_queue_seq_start,
2814
.next = iwl_dbgfs_tx_queue_seq_next,
2815
.stop = iwl_dbgfs_tx_queue_seq_stop,
2816
.show = iwl_dbgfs_tx_queue_seq_show,
2817
};
2818
2819
static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2820
{
2821
struct iwl_dbgfs_tx_queue_priv *priv;
2822
2823
priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2824
sizeof(*priv));
2825
2826
if (!priv)
2827
return -ENOMEM;
2828
2829
priv->trans = inode->i_private;
2830
return 0;
2831
}
2832
2833
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2834
char __user *user_buf,
2835
size_t count, loff_t *ppos)
2836
{
2837
struct iwl_trans *trans = file->private_data;
2838
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2839
char *buf;
2840
int pos = 0, i, ret;
2841
size_t bufsz;
2842
2843
bufsz = sizeof(char) * 121 * trans->info.num_rxqs;
2844
2845
if (!trans_pcie->rxq)
2846
return -EAGAIN;
2847
2848
buf = kzalloc(bufsz, GFP_KERNEL);
2849
if (!buf)
2850
return -ENOMEM;
2851
2852
for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) {
2853
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2854
2855
spin_lock_bh(&rxq->lock);
2856
2857
pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2858
i);
2859
pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2860
rxq->read);
2861
pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2862
rxq->write);
2863
pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2864
rxq->write_actual);
2865
pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2866
rxq->need_update);
2867
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2868
rxq->free_count);
2869
if (rxq->rb_stts) {
2870
u32 r = iwl_get_closed_rb_stts(trans, rxq);
2871
pos += scnprintf(buf + pos, bufsz - pos,
2872
"\tclosed_rb_num: %u\n", r);
2873
} else {
2874
pos += scnprintf(buf + pos, bufsz - pos,
2875
"\tclosed_rb_num: Not Allocated\n");
2876
}
2877
spin_unlock_bh(&rxq->lock);
2878
}
2879
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2880
kfree(buf);
2881
2882
return ret;
2883
}
2884
2885
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2886
char __user *user_buf,
2887
size_t count, loff_t *ppos)
2888
{
2889
struct iwl_trans *trans = file->private_data;
2890
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2891
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2892
2893
int pos = 0;
2894
char *buf;
2895
int bufsz = 24 * 64; /* 24 items * 64 char per item */
2896
ssize_t ret;
2897
2898
buf = kzalloc(bufsz, GFP_KERNEL);
2899
if (!buf)
2900
return -ENOMEM;
2901
2902
pos += scnprintf(buf + pos, bufsz - pos,
2903
"Interrupt Statistics Report:\n");
2904
2905
pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2906
isr_stats->hw);
2907
pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2908
isr_stats->sw);
2909
if (isr_stats->sw || isr_stats->hw) {
2910
pos += scnprintf(buf + pos, bufsz - pos,
2911
"\tLast Restarting Code: 0x%X\n",
2912
isr_stats->err_code);
2913
}
2914
#ifdef CONFIG_IWLWIFI_DEBUG
2915
pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2916
isr_stats->sch);
2917
pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2918
isr_stats->alive);
2919
#endif
2920
pos += scnprintf(buf + pos, bufsz - pos,
2921
"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2922
2923
pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2924
isr_stats->ctkill);
2925
2926
pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2927
isr_stats->wakeup);
2928
2929
pos += scnprintf(buf + pos, bufsz - pos,
2930
"Rx command responses:\t\t %u\n", isr_stats->rx);
2931
2932
pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2933
isr_stats->tx);
2934
2935
pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2936
isr_stats->unhandled);
2937
2938
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2939
kfree(buf);
2940
return ret;
2941
}
2942
2943
static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2944
const char __user *user_buf,
2945
size_t count, loff_t *ppos)
2946
{
2947
struct iwl_trans *trans = file->private_data;
2948
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2949
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2950
u32 reset_flag;
2951
int ret;
2952
2953
ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2954
if (ret)
2955
return ret;
2956
if (reset_flag == 0)
2957
memset(isr_stats, 0, sizeof(*isr_stats));
2958
2959
return count;
2960
}
2961
2962
static ssize_t iwl_dbgfs_csr_write(struct file *file,
2963
const char __user *user_buf,
2964
size_t count, loff_t *ppos)
2965
{
2966
struct iwl_trans *trans = file->private_data;
2967
2968
iwl_pcie_dump_csr(trans);
2969
2970
return count;
2971
}
2972
2973
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2974
char __user *user_buf,
2975
size_t count, loff_t *ppos)
2976
{
2977
struct iwl_trans *trans = file->private_data;
2978
char *buf = NULL;
2979
ssize_t ret;
2980
2981
ret = iwl_dump_fh(trans, &buf);
2982
if (ret < 0)
2983
return ret;
2984
if (!buf)
2985
return -EINVAL;
2986
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2987
kfree(buf);
2988
return ret;
2989
}
2990
2991
static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2992
char __user *user_buf,
2993
size_t count, loff_t *ppos)
2994
{
2995
struct iwl_trans *trans = file->private_data;
2996
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2997
char buf[100];
2998
int pos;
2999
3000
pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
3001
trans_pcie->debug_rfkill,
3002
!(iwl_read32(trans, CSR_GP_CNTRL) &
3003
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
3004
3005
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
3006
}
3007
3008
static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
3009
const char __user *user_buf,
3010
size_t count, loff_t *ppos)
3011
{
3012
struct iwl_trans *trans = file->private_data;
3013
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3014
bool new_value;
3015
int ret;
3016
3017
ret = kstrtobool_from_user(user_buf, count, &new_value);
3018
if (ret)
3019
return ret;
3020
if (new_value == trans_pcie->debug_rfkill)
3021
return count;
3022
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
3023
trans_pcie->debug_rfkill, new_value);
3024
trans_pcie->debug_rfkill = new_value;
3025
iwl_pcie_handle_rfkill_irq(trans, false);
3026
3027
return count;
3028
}
3029
3030
static int iwl_dbgfs_monitor_data_open(struct inode *inode,
3031
struct file *file)
3032
{
3033
struct iwl_trans *trans = inode->i_private;
3034
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3035
3036
if (!trans->dbg.dest_tlv ||
3037
trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
3038
IWL_ERR(trans, "Debug destination is not set to DRAM\n");
3039
return -ENOENT;
3040
}
3041
3042
if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
3043
return -EBUSY;
3044
3045
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
3046
return simple_open(inode, file);
3047
}
3048
3049
static int iwl_dbgfs_monitor_data_release(struct inode *inode,
3050
struct file *file)
3051
{
3052
struct iwl_trans_pcie *trans_pcie =
3053
IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
3054
3055
if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
3056
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3057
return 0;
3058
}
3059
3060
static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
3061
void *buf, ssize_t *size,
3062
ssize_t *bytes_copied)
3063
{
3064
ssize_t buf_size_left = count - *bytes_copied;
3065
3066
buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
3067
if (*size > buf_size_left)
3068
*size = buf_size_left;
3069
3070
*size -= copy_to_user(user_buf, buf, *size);
3071
*bytes_copied += *size;
3072
3073
if (buf_size_left == *size)
3074
return true;
3075
return false;
3076
}
3077
3078
static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
3079
char __user *user_buf,
3080
size_t count, loff_t *ppos)
3081
{
3082
struct iwl_trans *trans = file->private_data;
3083
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3084
u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
3085
struct cont_rec *data = &trans_pcie->fw_mon_data;
3086
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
3087
ssize_t size, bytes_copied = 0;
3088
bool b_full;
3089
3090
if (trans->dbg.dest_tlv) {
3091
write_ptr_addr =
3092
le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3093
wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3094
} else {
3095
write_ptr_addr = MON_BUFF_WRPTR;
3096
wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
3097
}
3098
3099
if (unlikely(!trans->dbg.rec_on))
3100
return 0;
3101
3102
mutex_lock(&data->mutex);
3103
if (data->state ==
3104
IWL_FW_MON_DBGFS_STATE_DISABLED) {
3105
mutex_unlock(&data->mutex);
3106
return 0;
3107
}
3108
3109
/* write_ptr position in bytes rather then DW */
3110
write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
3111
wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
3112
3113
if (data->prev_wrap_cnt == wrap_cnt) {
3114
size = write_ptr - data->prev_wr_ptr;
3115
curr_buf = cpu_addr + data->prev_wr_ptr;
3116
b_full = iwl_write_to_user_buf(user_buf, count,
3117
curr_buf, &size,
3118
&bytes_copied);
3119
data->prev_wr_ptr += size;
3120
3121
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
3122
write_ptr < data->prev_wr_ptr) {
3123
size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
3124
curr_buf = cpu_addr + data->prev_wr_ptr;
3125
b_full = iwl_write_to_user_buf(user_buf, count,
3126
curr_buf, &size,
3127
&bytes_copied);
3128
data->prev_wr_ptr += size;
3129
3130
if (!b_full) {
3131
size = write_ptr;
3132
b_full = iwl_write_to_user_buf(user_buf, count,
3133
cpu_addr, &size,
3134
&bytes_copied);
3135
data->prev_wr_ptr = size;
3136
data->prev_wrap_cnt++;
3137
}
3138
} else {
3139
if (data->prev_wrap_cnt == wrap_cnt - 1 &&
3140
write_ptr > data->prev_wr_ptr)
3141
IWL_WARN(trans,
3142
"write pointer passed previous write pointer, start copying from the beginning\n");
3143
else if (!unlikely(data->prev_wrap_cnt == 0 &&
3144
data->prev_wr_ptr == 0))
3145
IWL_WARN(trans,
3146
"monitor data is out of sync, start copying from the beginning\n");
3147
3148
size = write_ptr;
3149
b_full = iwl_write_to_user_buf(user_buf, count,
3150
cpu_addr, &size,
3151
&bytes_copied);
3152
data->prev_wr_ptr = size;
3153
data->prev_wrap_cnt = wrap_cnt;
3154
}
3155
3156
mutex_unlock(&data->mutex);
3157
3158
return bytes_copied;
3159
}
3160
3161
static ssize_t iwl_dbgfs_rf_read(struct file *file,
3162
char __user *user_buf,
3163
size_t count, loff_t *ppos)
3164
{
3165
struct iwl_trans *trans = file->private_data;
3166
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3167
3168
if (!trans_pcie->rf_name[0])
3169
return -ENODEV;
3170
3171
return simple_read_from_buffer(user_buf, count, ppos,
3172
trans_pcie->rf_name,
3173
strlen(trans_pcie->rf_name));
3174
}
3175
3176
static ssize_t iwl_dbgfs_reset_write(struct file *file,
3177
const char __user *user_buf,
3178
size_t count, loff_t *ppos)
3179
{
3180
struct iwl_trans *trans = file->private_data;
3181
static const char * const modes[] = {
3182
[IWL_RESET_MODE_SW_RESET] = "sw",
3183
[IWL_RESET_MODE_REPROBE] = "reprobe",
3184
[IWL_RESET_MODE_TOP_RESET] = "top",
3185
[IWL_RESET_MODE_REMOVE_ONLY] = "remove",
3186
[IWL_RESET_MODE_RESCAN] = "rescan",
3187
[IWL_RESET_MODE_FUNC_RESET] = "function",
3188
[IWL_RESET_MODE_PROD_RESET] = "product",
3189
};
3190
char buf[10] = {};
3191
int mode;
3192
3193
if (count > sizeof(buf) - 1)
3194
return -EINVAL;
3195
3196
if (copy_from_user(buf, user_buf, count))
3197
return -EFAULT;
3198
3199
mode = sysfs_match_string(modes, buf);
3200
if (mode < 0)
3201
return mode;
3202
3203
if (mode < IWL_RESET_MODE_REMOVE_ONLY) {
3204
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
3205
return -EINVAL;
3206
if (mode == IWL_RESET_MODE_TOP_RESET) {
3207
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
3208
return -EINVAL;
3209
trans->request_top_reset = 1;
3210
}
3211
iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS);
3212
iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS);
3213
return count;
3214
}
3215
3216
iwl_trans_pcie_reset(trans, mode);
3217
3218
return count;
3219
}
3220
3221
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
3222
DEBUGFS_READ_FILE_OPS(fh_reg);
3223
DEBUGFS_READ_FILE_OPS(rx_queue);
3224
DEBUGFS_WRITE_FILE_OPS(csr);
3225
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
3226
DEBUGFS_READ_FILE_OPS(rf);
3227
DEBUGFS_WRITE_FILE_OPS(reset);
3228
3229
static const struct file_operations iwl_dbgfs_tx_queue_ops = {
3230
.owner = THIS_MODULE,
3231
.open = iwl_dbgfs_tx_queue_open,
3232
.read = seq_read,
3233
.llseek = seq_lseek,
3234
.release = seq_release_private,
3235
};
3236
3237
static const struct file_operations iwl_dbgfs_monitor_data_ops = {
3238
.read = iwl_dbgfs_monitor_data_read,
3239
.open = iwl_dbgfs_monitor_data_open,
3240
.release = iwl_dbgfs_monitor_data_release,
3241
};
3242
3243
/* Create the debugfs files and directories */
3244
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
3245
{
3246
struct dentry *dir = trans->dbgfs_dir;
3247
3248
DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
3249
DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
3250
DEBUGFS_ADD_FILE(interrupt, dir, 0600);
3251
DEBUGFS_ADD_FILE(csr, dir, 0200);
3252
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
3253
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
3254
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
3255
DEBUGFS_ADD_FILE(rf, dir, 0400);
3256
DEBUGFS_ADD_FILE(reset, dir, 0200);
3257
}
3258
3259
void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
3260
{
3261
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3262
struct cont_rec *data = &trans_pcie->fw_mon_data;
3263
3264
mutex_lock(&data->mutex);
3265
data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
3266
mutex_unlock(&data->mutex);
3267
}
3268
#endif /*CONFIG_IWLWIFI_DEBUGFS */
3269
3270
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
3271
{
3272
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3273
u32 cmdlen = 0;
3274
int i;
3275
3276
for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
3277
cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
3278
3279
return cmdlen;
3280
}
3281
3282
static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
3283
struct iwl_fw_error_dump_data **data,
3284
int allocated_rb_nums)
3285
{
3286
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3287
int max_len = trans_pcie->rx_buf_bytes;
3288
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
3289
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3290
u32 i, r, j, rb_len = 0;
3291
3292
spin_lock_bh(&rxq->lock);
3293
3294
r = iwl_get_closed_rb_stts(trans, rxq);
3295
3296
for (i = rxq->read, j = 0;
3297
i != r && j < allocated_rb_nums;
3298
i = (i + 1) & RX_QUEUE_MASK, j++) {
3299
struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
3300
struct iwl_fw_error_dump_rb *rb;
3301
3302
dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
3303
max_len, DMA_FROM_DEVICE);
3304
3305
rb_len += sizeof(**data) + sizeof(*rb) + max_len;
3306
3307
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
3308
(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
3309
rb = (void *)(*data)->data;
3310
rb->index = cpu_to_le32(i);
3311
memcpy(rb->data, page_address(rxb->page), max_len);
3312
3313
*data = iwl_fw_error_next_data(*data);
3314
}
3315
3316
spin_unlock_bh(&rxq->lock);
3317
3318
return rb_len;
3319
}
3320
#define IWL_CSR_TO_DUMP (0x250)
3321
3322
static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
3323
struct iwl_fw_error_dump_data **data)
3324
{
3325
u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
3326
__le32 *val;
3327
int i;
3328
3329
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
3330
(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
3331
val = (void *)(*data)->data;
3332
3333
for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
3334
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3335
3336
*data = iwl_fw_error_next_data(*data);
3337
3338
return csr_len;
3339
}
3340
3341
static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
3342
struct iwl_fw_error_dump_data **data)
3343
{
3344
u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
3345
__le32 *val;
3346
int i;
3347
3348
if (!iwl_trans_grab_nic_access(trans))
3349
return 0;
3350
3351
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
3352
(*data)->len = cpu_to_le32(fh_regs_len);
3353
val = (void *)(*data)->data;
3354
3355
if (!trans->mac_cfg->gen2)
3356
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
3357
i += sizeof(u32))
3358
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3359
else
3360
for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
3361
i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
3362
i += sizeof(u32))
3363
*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
3364
i));
3365
3366
iwl_trans_release_nic_access(trans);
3367
3368
*data = iwl_fw_error_next_data(*data);
3369
3370
return sizeof(**data) + fh_regs_len;
3371
}
3372
3373
static u32
3374
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3375
struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3376
u32 monitor_len)
3377
{
3378
u32 buf_size_in_dwords = (monitor_len >> 2);
3379
u32 *buffer = (u32 *)fw_mon_data->data;
3380
u32 i;
3381
3382
if (!iwl_trans_grab_nic_access(trans))
3383
return 0;
3384
3385
iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3386
for (i = 0; i < buf_size_in_dwords; i++)
3387
buffer[i] = iwl_read_umac_prph_no_grab(trans,
3388
MON_DMARB_RD_DATA_ADDR);
3389
iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3390
3391
iwl_trans_release_nic_access(trans);
3392
3393
return monitor_len;
3394
}
3395
3396
static void
3397
iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3398
struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3399
{
3400
u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3401
3402
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3403
base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3404
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3405
write_ptr = DBGC_CUR_DBGBUF_STATUS;
3406
wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3407
} else if (trans->dbg.dest_tlv) {
3408
write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3409
wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3410
base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3411
} else {
3412
base = MON_BUFF_BASE_ADDR;
3413
write_ptr = MON_BUFF_WRPTR;
3414
wrap_cnt = MON_BUFF_CYCLE_CNT;
3415
}
3416
3417
write_ptr_val = iwl_read_prph(trans, write_ptr);
3418
fw_mon_data->fw_mon_cycle_cnt =
3419
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3420
fw_mon_data->fw_mon_base_ptr =
3421
cpu_to_le32(iwl_read_prph(trans, base));
3422
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3423
fw_mon_data->fw_mon_base_high_ptr =
3424
cpu_to_le32(iwl_read_prph(trans, base_high));
3425
write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3426
/* convert wrtPtr to DWs, to align with all HWs */
3427
write_ptr_val >>= 2;
3428
}
3429
fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3430
}
3431
3432
static u32
3433
iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3434
struct iwl_fw_error_dump_data **data,
3435
u32 monitor_len)
3436
{
3437
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3438
u32 len = 0;
3439
3440
if (trans->dbg.dest_tlv ||
3441
(fw_mon->size &&
3442
(trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3443
trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3444
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3445
3446
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3447
fw_mon_data = (void *)(*data)->data;
3448
3449
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3450
3451
len += sizeof(**data) + sizeof(*fw_mon_data);
3452
if (fw_mon->size) {
3453
memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3454
monitor_len = fw_mon->size;
3455
} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3456
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3457
/*
3458
* Update pointers to reflect actual values after
3459
* shifting
3460
*/
3461
if (trans->dbg.dest_tlv->version) {
3462
base = (iwl_read_prph(trans, base) &
3463
IWL_LDBG_M2S_BUF_BA_MSK) <<
3464
trans->dbg.dest_tlv->base_shift;
3465
base *= IWL_M2S_UNIT_SIZE;
3466
base += trans->mac_cfg->base->smem_offset;
3467
} else {
3468
base = iwl_read_prph(trans, base) <<
3469
trans->dbg.dest_tlv->base_shift;
3470
}
3471
3472
iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data,
3473
monitor_len / sizeof(u32));
3474
} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3475
monitor_len =
3476
iwl_trans_pci_dump_marbh_monitor(trans,
3477
fw_mon_data,
3478
monitor_len);
3479
} else {
3480
/* Didn't match anything - output no monitor data */
3481
monitor_len = 0;
3482
}
3483
3484
len += monitor_len;
3485
(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3486
}
3487
3488
return len;
3489
}
3490
3491
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3492
{
3493
if (trans->dbg.fw_mon.size) {
3494
*len += sizeof(struct iwl_fw_error_dump_data) +
3495
sizeof(struct iwl_fw_error_dump_fw_mon) +
3496
trans->dbg.fw_mon.size;
3497
return trans->dbg.fw_mon.size;
3498
} else if (trans->dbg.dest_tlv) {
3499
u32 base, end, cfg_reg, monitor_len;
3500
3501
if (trans->dbg.dest_tlv->version == 1) {
3502
cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3503
cfg_reg = iwl_read_prph(trans, cfg_reg);
3504
base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3505
trans->dbg.dest_tlv->base_shift;
3506
base *= IWL_M2S_UNIT_SIZE;
3507
base += trans->mac_cfg->base->smem_offset;
3508
3509
monitor_len =
3510
(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3511
trans->dbg.dest_tlv->end_shift;
3512
monitor_len *= IWL_M2S_UNIT_SIZE;
3513
} else {
3514
base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3515
end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3516
3517
base = iwl_read_prph(trans, base) <<
3518
trans->dbg.dest_tlv->base_shift;
3519
end = iwl_read_prph(trans, end) <<
3520
trans->dbg.dest_tlv->end_shift;
3521
3522
/* Make "end" point to the actual end */
3523
if (trans->mac_cfg->device_family >=
3524
IWL_DEVICE_FAMILY_8000 ||
3525
trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3526
end += (1 << trans->dbg.dest_tlv->end_shift);
3527
monitor_len = end - base;
3528
}
3529
*len += sizeof(struct iwl_fw_error_dump_data) +
3530
sizeof(struct iwl_fw_error_dump_fw_mon) +
3531
monitor_len;
3532
return monitor_len;
3533
}
3534
return 0;
3535
}
3536
3537
struct iwl_trans_dump_data *
3538
iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
3539
const struct iwl_dump_sanitize_ops *sanitize_ops,
3540
void *sanitize_ctx)
3541
{
3542
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3543
struct iwl_fw_error_dump_data *data;
3544
struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
3545
struct iwl_fw_error_dump_txcmd *txcmd;
3546
struct iwl_trans_dump_data *dump_data;
3547
u32 len, num_rbs = 0, monitor_len = 0;
3548
int i, ptr;
3549
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3550
!trans->mac_cfg->mq_rx_supported &&
3551
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3552
3553
if (!dump_mask)
3554
return NULL;
3555
3556
/* transport dump header */
3557
len = sizeof(*dump_data);
3558
3559
/* host commands */
3560
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3561
len += sizeof(*data) +
3562
cmdq->n_window * (sizeof(*txcmd) +
3563
TFD_MAX_PAYLOAD_SIZE);
3564
3565
/* FW monitor */
3566
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3567
monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3568
3569
/* CSR registers */
3570
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3571
len += sizeof(*data) + IWL_CSR_TO_DUMP;
3572
3573
/* FH registers */
3574
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3575
if (trans->mac_cfg->gen2)
3576
len += sizeof(*data) +
3577
(iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3578
iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3579
else
3580
len += sizeof(*data) +
3581
(FH_MEM_UPPER_BOUND -
3582
FH_MEM_LOWER_BOUND);
3583
}
3584
3585
if (dump_rbs) {
3586
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
3587
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3588
/* RBs */
3589
spin_lock_bh(&rxq->lock);
3590
num_rbs = iwl_get_closed_rb_stts(trans, rxq);
3591
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3592
spin_unlock_bh(&rxq->lock);
3593
3594
len += num_rbs * (sizeof(*data) +
3595
sizeof(struct iwl_fw_error_dump_rb) +
3596
(PAGE_SIZE << trans_pcie->rx_page_order));
3597
}
3598
3599
/* Paged memory for gen2 HW */
3600
if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3601
for (i = 0; i < trans->init_dram.paging_cnt; i++)
3602
len += sizeof(*data) +
3603
sizeof(struct iwl_fw_error_dump_paging) +
3604
trans->init_dram.paging[i].size;
3605
3606
dump_data = vzalloc(len);
3607
if (!dump_data)
3608
return NULL;
3609
3610
len = 0;
3611
data = (void *)dump_data->data;
3612
3613
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3614
u16 tfd_size = trans_pcie->txqs.tfd.size;
3615
3616
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3617
txcmd = (void *)data->data;
3618
spin_lock_bh(&cmdq->lock);
3619
ptr = cmdq->write_ptr;
3620
for (i = 0; i < cmdq->n_window; i++) {
3621
u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3622
u8 tfdidx;
3623
u32 caplen, cmdlen;
3624
3625
if (trans->mac_cfg->gen2)
3626
tfdidx = idx;
3627
else
3628
tfdidx = ptr;
3629
3630
cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3631
(u8 *)cmdq->tfds +
3632
tfd_size * tfdidx);
3633
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3634
3635
if (cmdlen) {
3636
len += sizeof(*txcmd) + caplen;
3637
txcmd->cmdlen = cpu_to_le32(cmdlen);
3638
txcmd->caplen = cpu_to_le32(caplen);
3639
memcpy(txcmd->data, cmdq->entries[idx].cmd,
3640
caplen);
3641
if (sanitize_ops && sanitize_ops->frob_hcmd)
3642
sanitize_ops->frob_hcmd(sanitize_ctx,
3643
txcmd->data,
3644
caplen);
3645
txcmd = (void *)((u8 *)txcmd->data + caplen);
3646
}
3647
3648
ptr = iwl_txq_dec_wrap(trans, ptr);
3649
}
3650
spin_unlock_bh(&cmdq->lock);
3651
3652
data->len = cpu_to_le32(len);
3653
len += sizeof(*data);
3654
data = iwl_fw_error_next_data(data);
3655
}
3656
3657
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3658
len += iwl_trans_pcie_dump_csr(trans, &data);
3659
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3660
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3661
if (dump_rbs)
3662
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3663
3664
/* Paged memory for gen2 HW */
3665
if (trans->mac_cfg->gen2 &&
3666
dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3667
for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3668
struct iwl_fw_error_dump_paging *paging;
3669
u32 page_len = trans->init_dram.paging[i].size;
3670
3671
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3672
data->len = cpu_to_le32(sizeof(*paging) + page_len);
3673
paging = (void *)data->data;
3674
paging->index = cpu_to_le32(i);
3675
memcpy(paging->data,
3676
trans->init_dram.paging[i].block, page_len);
3677
data = iwl_fw_error_next_data(data);
3678
3679
len += sizeof(*data) + sizeof(*paging) + page_len;
3680
}
3681
}
3682
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3683
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3684
3685
dump_data->len = len;
3686
3687
return dump_data;
3688
}
3689
3690
void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
3691
{
3692
if (enable)
3693
iwl_enable_interrupts(trans);
3694
else
3695
iwl_disable_interrupts(trans);
3696
}
3697
3698
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3699
{
3700
u32 inta_addr, sw_err_bit;
3701
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3702
3703
if (trans_pcie->msix_enabled) {
3704
inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3705
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3706
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
3707
else
3708
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3709
} else {
3710
inta_addr = CSR_INT;
3711
sw_err_bit = CSR_INT_BIT_SW_ERR;
3712
}
3713
3714
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
3715
}
3716
3717
static int iwl_trans_pcie_set_txcmd_info(const struct iwl_mac_cfg *mac_cfg,
3718
unsigned int *txcmd_size,
3719
unsigned int *txcmd_align)
3720
{
3721
if (!mac_cfg->gen2) {
3722
*txcmd_size = sizeof(struct iwl_tx_cmd_v6);
3723
*txcmd_align = sizeof(void *);
3724
} else if (mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
3725
*txcmd_size = sizeof(struct iwl_tx_cmd_v9);
3726
*txcmd_align = 64;
3727
} else {
3728
*txcmd_size = sizeof(struct iwl_tx_cmd);
3729
*txcmd_align = 128;
3730
}
3731
3732
*txcmd_size += sizeof(struct iwl_cmd_header);
3733
*txcmd_size += 36; /* biggest possible 802.11 header */
3734
3735
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
3736
if (WARN_ON((mac_cfg->gen2 && *txcmd_size >= *txcmd_align)))
3737
return -EINVAL;
3738
3739
return 0;
3740
}
3741
3742
static struct iwl_trans *
3743
iwl_trans_pcie_alloc(struct pci_dev *pdev,
3744
const struct iwl_mac_cfg *mac_cfg,
3745
struct iwl_trans_info *info, u8 __iomem *hw_base)
3746
{
3747
struct iwl_trans_pcie *trans_pcie, **priv;
3748
unsigned int txcmd_size, txcmd_align;
3749
struct iwl_trans *trans;
3750
unsigned int bc_tbl_n_entries;
3751
int ret, addr_size;
3752
3753
ret = iwl_trans_pcie_set_txcmd_info(mac_cfg, &txcmd_size,
3754
&txcmd_align);
3755
if (ret)
3756
return ERR_PTR(ret);
3757
3758
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
3759
mac_cfg, txcmd_size, txcmd_align);
3760
if (!trans)
3761
return ERR_PTR(-ENOMEM);
3762
3763
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3764
3765
trans_pcie->hw_base = hw_base;
3766
3767
/* Initialize the wait queue for commands */
3768
init_waitqueue_head(&trans_pcie->wait_command_queue);
3769
3770
if (trans->mac_cfg->gen2) {
3771
trans_pcie->txqs.tfd.addr_size = 64;
3772
trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
3773
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
3774
} else {
3775
trans_pcie->txqs.tfd.addr_size = 36;
3776
trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
3777
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
3778
}
3779
3780
trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12);
3781
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
3782
trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11);
3783
3784
info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
3785
3786
#ifdef CONFIG_INET
3787
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
3788
if (!trans_pcie->txqs.tso_hdr_page) {
3789
ret = -ENOMEM;
3790
goto out_free_trans;
3791
}
3792
#endif
3793
3794
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3795
bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;
3796
else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
3797
bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;
3798
else
3799
bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;
3800
3801
trans_pcie->txqs.bc_tbl_size =
3802
sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;
3803
/*
3804
* For gen2 devices, we use a single allocation for each byte-count
3805
* table, but they're pretty small (1k) so use a DMA pool that we
3806
* allocate here.
3807
*/
3808
if (trans->mac_cfg->gen2) {
3809
trans_pcie->txqs.bc_pool =
3810
dmam_pool_create("iwlwifi:bc", trans->dev,
3811
trans_pcie->txqs.bc_tbl_size,
3812
256, 0);
3813
if (!trans_pcie->txqs.bc_pool) {
3814
ret = -ENOMEM;
3815
goto out_free_tso;
3816
}
3817
}
3818
3819
/* Some things must not change even if the config does */
3820
WARN_ON(trans_pcie->txqs.tfd.addr_size !=
3821
(trans->mac_cfg->gen2 ? 64 : 36));
3822
3823
/* Initialize NAPI here - it should be before registering to mac80211
3824
* in the opmode but after the HW struct is allocated.
3825
*/
3826
trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
3827
if (!trans_pcie->napi_dev) {
3828
ret = -ENOMEM;
3829
goto out_free_tso;
3830
}
3831
/* The private struct in netdev is a pointer to struct iwl_trans_pcie */
3832
priv = netdev_priv(trans_pcie->napi_dev);
3833
*priv = trans_pcie;
3834
3835
trans_pcie->trans = trans;
3836
trans_pcie->opmode_down = true;
3837
spin_lock_init(&trans_pcie->irq_lock);
3838
spin_lock_init(&trans_pcie->reg_lock);
3839
spin_lock_init(&trans_pcie->alloc_page_lock);
3840
mutex_init(&trans_pcie->mutex);
3841
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3842
init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3843
init_waitqueue_head(&trans_pcie->imr_waitq);
3844
3845
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3846
WQ_HIGHPRI | WQ_UNBOUND, 0);
3847
if (!trans_pcie->rba.alloc_wq) {
3848
ret = -ENOMEM;
3849
goto out_free_ndev;
3850
}
3851
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3852
3853
trans_pcie->debug_rfkill = -1;
3854
3855
if (!mac_cfg->base->pcie_l1_allowed) {
3856
/*
3857
* W/A - seems to solve weird behavior. We need to remove this
3858
* if we don't want to stay in L1 all the time. This wastes a
3859
* lot of power.
3860
*/
3861
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3862
PCIE_LINK_STATE_L1 |
3863
PCIE_LINK_STATE_CLKPM);
3864
}
3865
3866
addr_size = trans_pcie->txqs.tfd.addr_size;
3867
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
3868
if (ret) {
3869
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3870
/* both attempts failed: */
3871
if (ret) {
3872
dev_err(&pdev->dev, "No suitable DMA available\n");
3873
goto out_no_pci;
3874
}
3875
}
3876
3877
/* We disable the RETRY_TIMEOUT register (0x41) to keep
3878
* PCI Tx retries from interfering with C3 CPU state */
3879
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3880
3881
trans_pcie->pci_dev = pdev;
3882
iwl_disable_interrupts(trans);
3883
3884
/*
3885
* In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3886
* changed, and now the revision step also includes bit 0-1 (no more
3887
* "dash" value). To keep hw_rev backwards compatible - we'll store it
3888
* in the old format.
3889
*/
3890
if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
3891
info->hw_rev_step = info->hw_rev & 0xF;
3892
else
3893
info->hw_rev_step = (info->hw_rev & 0xC) >> 2;
3894
3895
IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev);
3896
3897
iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info);
3898
3899
init_waitqueue_head(&trans_pcie->sx_waitq);
3900
3901
ret = iwl_pcie_alloc_invalid_tx_cmd(trans);
3902
if (ret)
3903
goto out_no_pci;
3904
3905
if (trans_pcie->msix_enabled) {
3906
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info);
3907
if (ret)
3908
goto out_no_pci;
3909
} else {
3910
ret = iwl_pcie_alloc_ict(trans);
3911
if (ret)
3912
goto out_no_pci;
3913
3914
ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3915
iwl_pcie_isr,
3916
iwl_pcie_irq_handler,
3917
IRQF_SHARED, DRV_NAME, trans);
3918
if (ret) {
3919
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3920
goto out_free_ict;
3921
}
3922
}
3923
3924
#ifdef CONFIG_IWLWIFI_DEBUGFS
3925
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3926
mutex_init(&trans_pcie->fw_mon_data.mutex);
3927
#endif
3928
3929
iwl_dbg_tlv_init(trans);
3930
3931
return trans;
3932
3933
out_free_ict:
3934
iwl_pcie_free_ict(trans);
3935
out_no_pci:
3936
destroy_workqueue(trans_pcie->rba.alloc_wq);
3937
out_free_ndev:
3938
free_netdev(trans_pcie->napi_dev);
3939
out_free_tso:
3940
#ifdef CONFIG_INET
3941
free_percpu(trans_pcie->txqs.tso_hdr_page);
3942
out_free_trans:
3943
#endif
3944
iwl_trans_free(trans);
3945
return ERR_PTR(ret);
3946
}
3947
3948
void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
3949
u32 dst_addr, u64 src_addr, u32 byte_cnt)
3950
{
3951
iwl_write_prph(trans, IMR_UREG_CHICK,
3952
iwl_read_prph(trans, IMR_UREG_CHICK) |
3953
IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
3954
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
3955
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
3956
(u32)(src_addr & 0xFFFFFFFF));
3957
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
3958
iwl_get_dma_hi_addr(src_addr));
3959
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
3960
iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
3961
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
3962
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
3963
IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
3964
}
3965
3966
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
3967
u32 dst_addr, u64 src_addr, u32 byte_cnt)
3968
{
3969
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3970
int ret = -1;
3971
3972
trans_pcie->imr_status = IMR_D2S_REQUESTED;
3973
iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
3974
ret = wait_event_timeout(trans_pcie->imr_waitq,
3975
trans_pcie->imr_status !=
3976
IMR_D2S_REQUESTED, 5 * HZ);
3977
if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
3978
IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
3979
iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);
3980
return -ETIMEDOUT;
3981
}
3982
trans_pcie->imr_status = IMR_D2S_IDLE;
3983
return 0;
3984
}
3985
3986
/*
3987
* Read rf id and cdb info from prph register and store it
3988
*/
3989
static void get_crf_id(struct iwl_trans *iwl_trans,
3990
struct iwl_trans_info *info)
3991
{
3992
u32 sd_reg_ver_addr;
3993
u32 hw_wfpm_id;
3994
u32 val = 0;
3995
u8 step;
3996
3997
if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
3998
sd_reg_ver_addr = SD_REG_VER_GEN2;
3999
else
4000
sd_reg_ver_addr = SD_REG_VER;
4001
4002
/* Enable access to peripheral registers */
4003
val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG);
4004
val |= WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK;
4005
iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val);
4006
4007
/* Read crf info */
4008
info->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
4009
4010
/* Read cnv info */
4011
info->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
4012
4013
/* For BZ-W, take B step also when A step is indicated */
4014
if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
4015
step = SILICON_B_STEP;
4016
4017
/* In BZ, the MAC step must be read from the CNVI aux register */
4018
if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
4019
step = CNVI_AUX_MISC_CHIP_MAC_STEP(info->hw_cnv_id);
4020
4021
/* For BZ-U, take B step also when A step is indicated */
4022
if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(info->hw_cnv_id) ==
4023
CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
4024
step == SILICON_A_STEP)
4025
step = SILICON_B_STEP;
4026
}
4027
4028
if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
4029
CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
4030
info->hw_rev_step = step;
4031
info->hw_rev |= step;
4032
}
4033
4034
/* Read cdb info (also contains the jacket info if needed in the future */
4035
hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
4036
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
4037
info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id);
4038
}
4039
4040
/*
4041
* In case that there is no OTP on the NIC, map the rf id and cdb info
4042
* from the prph registers.
4043
*/
4044
static int map_crf_id(struct iwl_trans *iwl_trans,
4045
struct iwl_trans_info *info)
4046
{
4047
int ret = 0;
4048
u32 val = info->hw_crf_id;
4049
u32 step_id = REG_CRF_ID_STEP(val);
4050
u32 slave_id = REG_CRF_ID_SLAVE(val);
4051
u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id);
4052
u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans,
4053
WFPM_OTP_CFG1_ADDR);
4054
u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id);
4055
u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id);
4056
4057
/* Map between crf id to rf id */
4058
switch (REG_CRF_ID_TYPE(val)) {
4059
case REG_CRF_ID_TYPE_JF_1:
4060
info->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12);
4061
break;
4062
case REG_CRF_ID_TYPE_JF_2:
4063
info->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12);
4064
break;
4065
case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1:
4066
info->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12);
4067
break;
4068
case REG_CRF_ID_TYPE_HR_NONE_CDB:
4069
info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
4070
break;
4071
case REG_CRF_ID_TYPE_HR_CDB:
4072
info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
4073
break;
4074
case REG_CRF_ID_TYPE_GF:
4075
info->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
4076
break;
4077
case REG_CRF_ID_TYPE_FM:
4078
info->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
4079
break;
4080
case REG_CRF_ID_TYPE_WHP:
4081
info->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
4082
break;
4083
case REG_CRF_ID_TYPE_PE:
4084
info->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);
4085
break;
4086
default:
4087
ret = -EIO;
4088
IWL_ERR(iwl_trans,
4089
"Can't find a correct rfid for crf id 0x%x\n",
4090
REG_CRF_ID_TYPE(val));
4091
goto out;
4092
}
4093
4094
/* Set Step-id */
4095
info->hw_rf_id |= (step_id << 8);
4096
4097
/* Set CDB capabilities */
4098
if (cdb_id_wfpm || slave_id) {
4099
info->hw_rf_id += BIT(28);
4100
IWL_INFO(iwl_trans, "Adding cdb to rf id\n");
4101
}
4102
4103
/* Set Jacket capabilities */
4104
if (jacket_id_wfpm || jacket_id_cnv) {
4105
info->hw_rf_id += BIT(29);
4106
IWL_INFO(iwl_trans, "Adding jacket to rf id\n");
4107
}
4108
4109
IWL_INFO(iwl_trans,
4110
"Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n",
4111
REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id);
4112
IWL_INFO(iwl_trans,
4113
"Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n",
4114
cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id);
4115
IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n",
4116
jacket_id_cnv, info->hw_cnv_id);
4117
4118
out:
4119
return ret;
4120
}
4121
4122
static void iwl_pcie_recheck_me_status(struct work_struct *wk)
4123
{
4124
struct iwl_trans_pcie *trans_pcie = container_of(wk,
4125
typeof(*trans_pcie),
4126
me_recheck_wk.work);
4127
u32 val;
4128
4129
val = iwl_read32(trans_pcie->trans, CSR_HW_IF_CONFIG_REG);
4130
trans_pcie->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);
4131
}
4132
4133
static void iwl_pcie_check_me_status(struct iwl_trans *trans)
4134
{
4135
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4136
u32 val;
4137
4138
trans_pcie->me_present = -1;
4139
4140
INIT_DELAYED_WORK(&trans_pcie->me_recheck_wk,
4141
iwl_pcie_recheck_me_status);
4142
4143
/* we don't have a good way of determining this until BZ */
4144
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
4145
return;
4146
4147
val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1);
4148
if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) {
4149
trans_pcie->me_present =
4150
!!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT);
4151
return;
4152
}
4153
4154
val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
4155
if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN |
4156
CSR_HW_IF_CONFIG_REG_IAMT_UP)) {
4157
trans_pcie->me_present = 1;
4158
return;
4159
}
4160
4161
/* recheck again later, ME might still be initializing */
4162
schedule_delayed_work(&trans_pcie->me_recheck_wk, HZ);
4163
}
4164
4165
int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
4166
const struct pci_device_id *ent,
4167
const struct iwl_mac_cfg *mac_cfg,
4168
u8 __iomem *hw_base, u32 hw_rev)
4169
{
4170
const struct iwl_dev_info *dev_info;
4171
struct iwl_trans_info info = {
4172
.hw_id = (pdev->device << 16) + pdev->subsystem_device,
4173
.hw_rev = hw_rev,
4174
};
4175
struct iwl_trans *iwl_trans;
4176
struct iwl_trans_pcie *trans_pcie;
4177
int ret;
4178
4179
iwl_trans = iwl_trans_pcie_alloc(pdev, mac_cfg, &info, hw_base);
4180
if (IS_ERR(iwl_trans))
4181
return PTR_ERR(iwl_trans);
4182
4183
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
4184
4185
iwl_trans_pcie_check_product_reset_status(pdev);
4186
iwl_trans_pcie_check_product_reset_mode(pdev);
4187
4188
/* set the things we know so far for the grab NIC access */
4189
iwl_trans_set_info(iwl_trans, &info);
4190
4191
/*
4192
* Let's try to grab NIC access early here. Sometimes, NICs may
4193
* fail to initialize, and if that happens it's better if we see
4194
* issues early on (and can reprobe, per the logic inside), than
4195
* first trying to load the firmware etc. and potentially only
4196
* detecting any problems when the first interface is brought up.
4197
*/
4198
ret = iwl_pcie_prepare_card_hw(iwl_trans);
4199
if (!ret) {
4200
ret = iwl_finish_nic_init(iwl_trans);
4201
if (ret)
4202
goto out_free_trans;
4203
if (iwl_trans_grab_nic_access(iwl_trans)) {
4204
get_crf_id(iwl_trans, &info);
4205
/* all good */
4206
iwl_trans_release_nic_access(iwl_trans);
4207
} else {
4208
ret = -EIO;
4209
goto out_free_trans;
4210
}
4211
}
4212
4213
info.hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
4214
4215
/*
4216
* The RF_ID is set to zero in blank OTP so read version to
4217
* extract the RF_ID.
4218
* This is relevant only for family 9000 and up.
4219
*/
4220
if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
4221
!CSR_HW_RFID_TYPE(info.hw_rf_id) && map_crf_id(iwl_trans, &info)) {
4222
ret = -EINVAL;
4223
goto out_free_trans;
4224
}
4225
4226
IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
4227
pdev->device, pdev->subsystem_device,
4228
info.hw_rev, info.hw_rf_id);
4229
4230
dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
4231
CSR_HW_RFID_TYPE(info.hw_rf_id),
4232
CSR_HW_RFID_IS_CDB(info.hw_rf_id),
4233
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
4234
IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device),
4235
!iwl_trans->mac_cfg->integrated);
4236
if (dev_info) {
4237
iwl_trans->cfg = dev_info->cfg;
4238
info.name = dev_info->name;
4239
}
4240
4241
#if IS_ENABLED(CONFIG_IWLMVM)
4242
4243
/*
4244
* special-case 7265D, it has the same PCI IDs.
4245
*
4246
* Note that because we already pass the cfg to the transport above,
4247
* all the parameters that the transport uses must, until that is
4248
* changed, be identical to the ones in the 7265D configuration.
4249
*/
4250
if (iwl_trans->cfg == &iwl7265_cfg &&
4251
(info.hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
4252
iwl_trans->cfg = &iwl7265d_cfg;
4253
#endif
4254
if (!iwl_trans->cfg) {
4255
pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
4256
pdev->device, pdev->subsystem_device,
4257
info.hw_rev, info.hw_rf_id);
4258
ret = -EINVAL;
4259
goto out_free_trans;
4260
}
4261
4262
IWL_INFO(iwl_trans, "Detected %s\n", info.name);
4263
4264
if (iwl_trans->mac_cfg->mq_rx_supported) {
4265
if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
4266
ret = -EINVAL;
4267
goto out_free_trans;
4268
}
4269
trans_pcie->num_rx_bufs = iwl_trans_get_num_rbds(iwl_trans);
4270
} else {
4271
trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
4272
}
4273
4274
if (!iwl_trans->mac_cfg->integrated) {
4275
u16 link_status;
4276
4277
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status);
4278
4279
info.pcie_link_speed =
4280
u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS);
4281
}
4282
4283
iwl_trans_set_info(iwl_trans, &info);
4284
4285
pci_set_drvdata(pdev, iwl_trans);
4286
4287
iwl_pcie_check_me_status(iwl_trans);
4288
4289
/* try to get ownership so that we'll know if we don't own it */
4290
iwl_pcie_prepare_card_hw(iwl_trans);
4291
4292
iwl_trans->drv = iwl_drv_start(iwl_trans);
4293
4294
if (IS_ERR(iwl_trans->drv)) {
4295
ret = PTR_ERR(iwl_trans->drv);
4296
goto out_free_trans;
4297
}
4298
4299
/* register transport layer debugfs here */
4300
iwl_trans_pcie_dbgfs_register(iwl_trans);
4301
4302
return 0;
4303
4304
out_free_trans:
4305
iwl_trans_pcie_free(iwl_trans);
4306
return ret;
4307
}
4308
4309