Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath10k/pci.c
107414 views
1
// SPDX-License-Identifier: ISC
2
/*
3
* Copyright (c) 2005-2011 Atheros Communications Inc.
4
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
6
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
7
*/
8
9
#if defined(__FreeBSD__)
10
#define LINUXKPI_PARAM_PREFIX ath10k_pci_
11
#endif
12
13
#include <linux/pci.h>
14
#include <linux/module.h>
15
#include <linux/interrupt.h>
16
#include <linux/spinlock.h>
17
#include <linux/bitops.h>
18
#if defined(__FreeBSD__)
19
#include <linux/delay.h>
20
#include <sys/rman.h>
21
#endif
22
23
#include "core.h"
24
#include "debug.h"
25
#include "coredump.h"
26
27
#include "targaddrs.h"
28
#include "bmi.h"
29
30
#include "hif.h"
31
#include "htc.h"
32
33
#include "ce.h"
34
#include "pci.h"
35
36
enum ath10k_pci_reset_mode {
37
ATH10K_PCI_RESET_AUTO = 0,
38
ATH10K_PCI_RESET_WARM_ONLY = 1,
39
};
40
41
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
42
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
43
44
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
45
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
46
47
module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
48
MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
49
50
/* how long wait to wait for target to initialise, in ms */
51
#define ATH10K_PCI_TARGET_WAIT 3000
52
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
53
54
/* Maximum number of bytes that can be handled atomically by
55
* diag read and write.
56
*/
57
#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
58
59
#define QCA99X0_PCIE_BAR0_START_REG 0x81030
60
#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
61
#define QCA99X0_CPU_MEM_DATA_REG 0x4d010
62
63
static const struct pci_device_id ath10k_pci_id_table[] = {
64
/* PCI-E QCA988X V2 (Ubiquiti branded) */
65
{ PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
66
67
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
68
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
69
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
70
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
71
{ PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
72
{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
73
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
74
{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
75
{}
76
};
77
78
static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
79
/* QCA988X pre 2.0 chips are not supported because they need some nasty
80
* hacks. ath10k doesn't have them and these devices crash horribly
81
* because of that.
82
*/
83
{ QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
84
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
85
86
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
87
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
88
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
89
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
90
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
91
92
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
93
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
94
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
95
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
96
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
97
98
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
99
100
{ QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
101
102
{ QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
103
104
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
105
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
106
107
{ QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
108
};
109
110
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
111
static int ath10k_pci_cold_reset(struct ath10k *ar);
112
static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
113
static int ath10k_pci_init_irq(struct ath10k *ar);
114
static int ath10k_pci_deinit_irq(struct ath10k *ar);
115
static int ath10k_pci_request_irq(struct ath10k *ar);
116
static void ath10k_pci_free_irq(struct ath10k *ar);
117
static int ath10k_pci_bmi_wait(struct ath10k *ar,
118
struct ath10k_ce_pipe *tx_pipe,
119
struct ath10k_ce_pipe *rx_pipe,
120
struct bmi_xfer *xfer);
121
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
122
static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
123
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
124
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
125
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
126
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
127
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
128
129
static const struct ce_attr pci_host_ce_config_wlan[] = {
130
/* CE0: host->target HTC control and raw streams */
131
{
132
.flags = CE_ATTR_FLAGS,
133
.src_nentries = 16,
134
.src_sz_max = 256,
135
.dest_nentries = 0,
136
.send_cb = ath10k_pci_htc_tx_cb,
137
},
138
139
/* CE1: target->host HTT + HTC control */
140
{
141
.flags = CE_ATTR_FLAGS,
142
.src_nentries = 0,
143
.src_sz_max = 2048,
144
.dest_nentries = 512,
145
.recv_cb = ath10k_pci_htt_htc_rx_cb,
146
},
147
148
/* CE2: target->host WMI */
149
{
150
.flags = CE_ATTR_FLAGS,
151
.src_nentries = 0,
152
.src_sz_max = 2048,
153
.dest_nentries = 128,
154
.recv_cb = ath10k_pci_htc_rx_cb,
155
},
156
157
/* CE3: host->target WMI */
158
{
159
.flags = CE_ATTR_FLAGS,
160
.src_nentries = 32,
161
.src_sz_max = 2048,
162
.dest_nentries = 0,
163
.send_cb = ath10k_pci_htc_tx_cb,
164
},
165
166
/* CE4: host->target HTT */
167
{
168
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
169
.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
170
.src_sz_max = 256,
171
.dest_nentries = 0,
172
.send_cb = ath10k_pci_htt_tx_cb,
173
},
174
175
/* CE5: target->host HTT (HIF->HTT) */
176
{
177
.flags = CE_ATTR_FLAGS,
178
.src_nentries = 0,
179
.src_sz_max = 512,
180
.dest_nentries = 512,
181
.recv_cb = ath10k_pci_htt_rx_cb,
182
},
183
184
/* CE6: target autonomous hif_memcpy */
185
{
186
.flags = CE_ATTR_FLAGS,
187
.src_nentries = 0,
188
.src_sz_max = 0,
189
.dest_nentries = 0,
190
},
191
192
/* CE7: ce_diag, the Diagnostic Window */
193
{
194
.flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
195
.src_nentries = 2,
196
.src_sz_max = DIAG_TRANSFER_LIMIT,
197
.dest_nentries = 2,
198
},
199
200
/* CE8: target->host pktlog */
201
{
202
.flags = CE_ATTR_FLAGS,
203
.src_nentries = 0,
204
.src_sz_max = 2048,
205
.dest_nentries = 128,
206
.recv_cb = ath10k_pci_pktlog_rx_cb,
207
},
208
209
/* CE9 target autonomous qcache memcpy */
210
{
211
.flags = CE_ATTR_FLAGS,
212
.src_nentries = 0,
213
.src_sz_max = 0,
214
.dest_nentries = 0,
215
},
216
217
/* CE10: target autonomous hif memcpy */
218
{
219
.flags = CE_ATTR_FLAGS,
220
.src_nentries = 0,
221
.src_sz_max = 0,
222
.dest_nentries = 0,
223
},
224
225
/* CE11: target autonomous hif memcpy */
226
{
227
.flags = CE_ATTR_FLAGS,
228
.src_nentries = 0,
229
.src_sz_max = 0,
230
.dest_nentries = 0,
231
},
232
};
233
234
/* Target firmware's Copy Engine configuration. */
235
static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
236
/* CE0: host->target HTC control and raw streams */
237
{
238
.pipenum = __cpu_to_le32(0),
239
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
240
.nentries = __cpu_to_le32(32),
241
.nbytes_max = __cpu_to_le32(256),
242
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
243
.reserved = __cpu_to_le32(0),
244
},
245
246
/* CE1: target->host HTT + HTC control */
247
{
248
.pipenum = __cpu_to_le32(1),
249
.pipedir = __cpu_to_le32(PIPEDIR_IN),
250
.nentries = __cpu_to_le32(32),
251
.nbytes_max = __cpu_to_le32(2048),
252
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
253
.reserved = __cpu_to_le32(0),
254
},
255
256
/* CE2: target->host WMI */
257
{
258
.pipenum = __cpu_to_le32(2),
259
.pipedir = __cpu_to_le32(PIPEDIR_IN),
260
.nentries = __cpu_to_le32(64),
261
.nbytes_max = __cpu_to_le32(2048),
262
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
263
.reserved = __cpu_to_le32(0),
264
},
265
266
/* CE3: host->target WMI */
267
{
268
.pipenum = __cpu_to_le32(3),
269
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
270
.nentries = __cpu_to_le32(32),
271
.nbytes_max = __cpu_to_le32(2048),
272
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
273
.reserved = __cpu_to_le32(0),
274
},
275
276
/* CE4: host->target HTT */
277
{
278
.pipenum = __cpu_to_le32(4),
279
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
280
.nentries = __cpu_to_le32(256),
281
.nbytes_max = __cpu_to_le32(256),
282
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
283
.reserved = __cpu_to_le32(0),
284
},
285
286
/* NB: 50% of src nentries, since tx has 2 frags */
287
288
/* CE5: target->host HTT (HIF->HTT) */
289
{
290
.pipenum = __cpu_to_le32(5),
291
.pipedir = __cpu_to_le32(PIPEDIR_IN),
292
.nentries = __cpu_to_le32(32),
293
.nbytes_max = __cpu_to_le32(512),
294
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
295
.reserved = __cpu_to_le32(0),
296
},
297
298
/* CE6: Reserved for target autonomous hif_memcpy */
299
{
300
.pipenum = __cpu_to_le32(6),
301
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
302
.nentries = __cpu_to_le32(32),
303
.nbytes_max = __cpu_to_le32(4096),
304
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
305
.reserved = __cpu_to_le32(0),
306
},
307
308
/* CE7 used only by Host */
309
{
310
.pipenum = __cpu_to_le32(7),
311
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
312
.nentries = __cpu_to_le32(0),
313
.nbytes_max = __cpu_to_le32(0),
314
.flags = __cpu_to_le32(0),
315
.reserved = __cpu_to_le32(0),
316
},
317
318
/* CE8 target->host packtlog */
319
{
320
.pipenum = __cpu_to_le32(8),
321
.pipedir = __cpu_to_le32(PIPEDIR_IN),
322
.nentries = __cpu_to_le32(64),
323
.nbytes_max = __cpu_to_le32(2048),
324
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
325
.reserved = __cpu_to_le32(0),
326
},
327
328
/* CE9 target autonomous qcache memcpy */
329
{
330
.pipenum = __cpu_to_le32(9),
331
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
332
.nentries = __cpu_to_le32(32),
333
.nbytes_max = __cpu_to_le32(2048),
334
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
335
.reserved = __cpu_to_le32(0),
336
},
337
338
/* It not necessary to send target wlan configuration for CE10 & CE11
339
* as these CEs are not actively used in target.
340
*/
341
};
342
343
/*
344
* Map from service/endpoint to Copy Engine.
345
* This table is derived from the CE_PCI TABLE, above.
346
* It is passed to the Target at startup for use by firmware.
347
*/
348
static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
349
{
350
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
351
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
352
__cpu_to_le32(3),
353
},
354
{
355
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
356
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
357
__cpu_to_le32(2),
358
},
359
{
360
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
361
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
362
__cpu_to_le32(3),
363
},
364
{
365
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
366
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
367
__cpu_to_le32(2),
368
},
369
{
370
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
371
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
372
__cpu_to_le32(3),
373
},
374
{
375
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
376
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
377
__cpu_to_le32(2),
378
},
379
{
380
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
381
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
382
__cpu_to_le32(3),
383
},
384
{
385
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
386
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
387
__cpu_to_le32(2),
388
},
389
{
390
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
391
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
392
__cpu_to_le32(3),
393
},
394
{
395
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
396
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
397
__cpu_to_le32(2),
398
},
399
{
400
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
401
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
402
__cpu_to_le32(0),
403
},
404
{
405
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
406
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
407
__cpu_to_le32(1),
408
},
409
{ /* not used */
410
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
411
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
412
__cpu_to_le32(0),
413
},
414
{ /* not used */
415
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
416
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
417
__cpu_to_le32(1),
418
},
419
{
420
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
421
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
422
__cpu_to_le32(4),
423
},
424
{
425
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
426
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
427
__cpu_to_le32(5),
428
},
429
430
/* (Additions here) */
431
432
{ /* must be last */
433
__cpu_to_le32(0),
434
__cpu_to_le32(0),
435
__cpu_to_le32(0),
436
},
437
};
438
439
static bool ath10k_pci_is_awake(struct ath10k *ar)
440
{
441
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
442
#if defined(__linux__)
443
u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
444
RTC_STATE_ADDRESS);
445
#elif defined(__FreeBSD__)
446
u32 val = bus_read_4((struct resource *)ar_pci->mem, PCIE_LOCAL_BASE_ADDRESS +
447
RTC_STATE_ADDRESS);
448
#endif
449
450
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
451
}
452
453
static void __ath10k_pci_wake(struct ath10k *ar)
454
{
455
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
456
457
lockdep_assert_held(&ar_pci->ps_lock);
458
459
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
460
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
461
462
#if defined(__linux__)
463
iowrite32(PCIE_SOC_WAKE_V_MASK,
464
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
465
PCIE_SOC_WAKE_ADDRESS);
466
#elif defined(__FreeBSD__)
467
bus_write_4((struct resource *)ar_pci->mem,
468
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
469
PCIE_SOC_WAKE_V_MASK);
470
#endif
471
}
472
473
static void __ath10k_pci_sleep(struct ath10k *ar)
474
{
475
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
476
477
lockdep_assert_held(&ar_pci->ps_lock);
478
479
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
480
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
481
482
#if defined(__linux__)
483
iowrite32(PCIE_SOC_WAKE_RESET,
484
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
485
PCIE_SOC_WAKE_ADDRESS);
486
#elif defined(__FreeBSD__)
487
bus_write_4((struct resource *)ar_pci->mem,
488
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
489
PCIE_SOC_WAKE_RESET);
490
#endif
491
ar_pci->ps_awake = false;
492
}
493
494
static int ath10k_pci_wake_wait(struct ath10k *ar)
495
{
496
int tot_delay = 0;
497
int curr_delay = 5;
498
499
while (tot_delay < PCIE_WAKE_TIMEOUT) {
500
if (ath10k_pci_is_awake(ar)) {
501
if (tot_delay > PCIE_WAKE_LATE_US)
502
ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
503
tot_delay / 1000);
504
return 0;
505
}
506
507
udelay(curr_delay);
508
tot_delay += curr_delay;
509
510
if (curr_delay < 50)
511
curr_delay += 5;
512
}
513
514
return -ETIMEDOUT;
515
}
516
517
static int ath10k_pci_force_wake(struct ath10k *ar)
518
{
519
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
520
unsigned long flags;
521
int ret = 0;
522
523
if (ar_pci->pci_ps)
524
return ret;
525
526
spin_lock_irqsave(&ar_pci->ps_lock, flags);
527
528
if (!ar_pci->ps_awake) {
529
#if defined(__linux__)
530
iowrite32(PCIE_SOC_WAKE_V_MASK,
531
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
532
PCIE_SOC_WAKE_ADDRESS);
533
#elif defined(__FreeBSD__)
534
bus_write_4((struct resource *)ar_pci->mem,
535
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
536
PCIE_SOC_WAKE_V_MASK);
537
#endif
538
539
ret = ath10k_pci_wake_wait(ar);
540
if (ret == 0)
541
ar_pci->ps_awake = true;
542
}
543
544
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
545
546
return ret;
547
}
548
549
static void ath10k_pci_force_sleep(struct ath10k *ar)
550
{
551
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
552
unsigned long flags;
553
554
spin_lock_irqsave(&ar_pci->ps_lock, flags);
555
556
#if defined(__linux__)
557
iowrite32(PCIE_SOC_WAKE_RESET,
558
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
559
PCIE_SOC_WAKE_ADDRESS);
560
#elif defined(__FreeBSD__)
561
bus_write_4((struct resource *)ar_pci->mem,
562
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
563
PCIE_SOC_WAKE_RESET);
564
#endif
565
ar_pci->ps_awake = false;
566
567
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
568
}
569
570
static int ath10k_pci_wake(struct ath10k *ar)
571
{
572
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
573
unsigned long flags;
574
int ret = 0;
575
576
if (ar_pci->pci_ps == 0)
577
return ret;
578
579
spin_lock_irqsave(&ar_pci->ps_lock, flags);
580
581
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
582
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
583
584
/* This function can be called very frequently. To avoid excessive
585
* CPU stalls for MMIO reads use a cache var to hold the device state.
586
*/
587
if (!ar_pci->ps_awake) {
588
__ath10k_pci_wake(ar);
589
590
ret = ath10k_pci_wake_wait(ar);
591
if (ret == 0)
592
ar_pci->ps_awake = true;
593
}
594
595
if (ret == 0) {
596
ar_pci->ps_wake_refcount++;
597
WARN_ON(ar_pci->ps_wake_refcount == 0);
598
}
599
600
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
601
602
return ret;
603
}
604
605
static void ath10k_pci_sleep(struct ath10k *ar)
606
{
607
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
608
unsigned long flags;
609
610
if (ar_pci->pci_ps == 0)
611
return;
612
613
spin_lock_irqsave(&ar_pci->ps_lock, flags);
614
615
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
616
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
617
618
if (WARN_ON(ar_pci->ps_wake_refcount == 0))
619
goto skip;
620
621
ar_pci->ps_wake_refcount--;
622
623
mod_timer(&ar_pci->ps_timer, jiffies +
624
msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
625
626
skip:
627
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
628
}
629
630
static void ath10k_pci_ps_timer(struct timer_list *t)
631
{
632
struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, ps_timer);
633
struct ath10k *ar = ar_pci->ar;
634
unsigned long flags;
635
636
spin_lock_irqsave(&ar_pci->ps_lock, flags);
637
638
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
639
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
640
641
if (ar_pci->ps_wake_refcount > 0)
642
goto skip;
643
644
__ath10k_pci_sleep(ar);
645
646
skip:
647
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
648
}
649
650
static void ath10k_pci_sleep_sync(struct ath10k *ar)
651
{
652
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
653
unsigned long flags;
654
655
if (ar_pci->pci_ps == 0) {
656
ath10k_pci_force_sleep(ar);
657
return;
658
}
659
660
timer_delete_sync(&ar_pci->ps_timer);
661
662
spin_lock_irqsave(&ar_pci->ps_lock, flags);
663
WARN_ON(ar_pci->ps_wake_refcount > 0);
664
__ath10k_pci_sleep(ar);
665
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
666
}
667
668
static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
669
{
670
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671
int ret;
672
673
if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
674
ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
675
offset, offset + sizeof(value), ar_pci->mem_len);
676
return;
677
}
678
679
ret = ath10k_pci_wake(ar);
680
if (ret) {
681
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
682
value, offset, ret);
683
return;
684
}
685
686
#if defined(__linux__)
687
iowrite32(value, ar_pci->mem + offset);
688
#elif defined(__FreeBSD__)
689
bus_write_4((struct resource *)ar_pci->mem, offset, value);
690
#endif
691
ath10k_pci_sleep(ar);
692
}
693
694
static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
695
{
696
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
697
u32 val;
698
int ret;
699
700
if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
701
ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
702
offset, offset + sizeof(val), ar_pci->mem_len);
703
return 0;
704
}
705
706
ret = ath10k_pci_wake(ar);
707
if (ret) {
708
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
709
offset, ret);
710
return 0xffffffff;
711
}
712
713
#if defined(__linux__)
714
val = ioread32(ar_pci->mem + offset);
715
#elif defined(__FreeBSD__)
716
val = bus_read_4((struct resource *)ar_pci->mem, offset);
717
#endif
718
ath10k_pci_sleep(ar);
719
720
return val;
721
}
722
723
inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
724
{
725
struct ath10k_ce *ce = ath10k_ce_priv(ar);
726
727
ce->bus_ops->write32(ar, offset, value);
728
}
729
730
inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
731
{
732
struct ath10k_ce *ce = ath10k_ce_priv(ar);
733
734
return ce->bus_ops->read32(ar, offset);
735
}
736
737
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
738
{
739
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
740
}
741
742
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
743
{
744
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
745
}
746
747
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
748
{
749
return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
750
}
751
752
void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
753
{
754
ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
755
}
756
757
bool ath10k_pci_irq_pending(struct ath10k *ar)
758
{
759
u32 cause;
760
761
/* Check if the shared legacy irq is for us */
762
cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
763
PCIE_INTR_CAUSE_ADDRESS);
764
if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
765
return true;
766
767
return false;
768
}
769
770
void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar)
771
{
772
/* IMPORTANT: INTR_CLR register has to be set after
773
* INTR_ENABLE is set to 0, otherwise interrupt can not be
774
* really cleared.
775
*/
776
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
777
0);
778
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
779
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
780
781
/* IMPORTANT: this extra read transaction is required to
782
* flush the posted write buffer.
783
*/
784
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
785
PCIE_INTR_ENABLE_ADDRESS);
786
}
787
788
void ath10k_pci_enable_intx_irq(struct ath10k *ar)
789
{
790
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
791
PCIE_INTR_ENABLE_ADDRESS,
792
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
793
794
/* IMPORTANT: this extra read transaction is required to
795
* flush the posted write buffer.
796
*/
797
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
798
PCIE_INTR_ENABLE_ADDRESS);
799
}
800
801
static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
802
{
803
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
804
805
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
806
return "msi";
807
808
return "legacy";
809
}
810
811
static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
812
{
813
struct ath10k *ar = pipe->hif_ce_state;
814
struct ath10k_ce *ce = ath10k_ce_priv(ar);
815
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
816
struct sk_buff *skb;
817
dma_addr_t paddr;
818
int ret;
819
820
skb = dev_alloc_skb(pipe->buf_sz);
821
if (!skb)
822
return -ENOMEM;
823
824
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
825
826
paddr = dma_map_single(ar->dev, skb->data,
827
skb->len + skb_tailroom(skb),
828
DMA_FROM_DEVICE);
829
if (unlikely(dma_mapping_error(ar->dev, paddr))) {
830
ath10k_warn(ar, "failed to dma map pci rx buf\n");
831
dev_kfree_skb_any(skb);
832
return -EIO;
833
}
834
835
ATH10K_SKB_RXCB(skb)->paddr = paddr;
836
837
spin_lock_bh(&ce->ce_lock);
838
ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
839
spin_unlock_bh(&ce->ce_lock);
840
if (ret) {
841
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
842
DMA_FROM_DEVICE);
843
dev_kfree_skb_any(skb);
844
return ret;
845
}
846
847
return 0;
848
}
849
850
static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
851
{
852
struct ath10k *ar = pipe->hif_ce_state;
853
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
854
struct ath10k_ce *ce = ath10k_ce_priv(ar);
855
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
856
int ret, num;
857
858
if (pipe->buf_sz == 0)
859
return;
860
861
if (!ce_pipe->dest_ring)
862
return;
863
864
spin_lock_bh(&ce->ce_lock);
865
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
866
spin_unlock_bh(&ce->ce_lock);
867
868
while (num >= 0) {
869
ret = __ath10k_pci_rx_post_buf(pipe);
870
if (ret) {
871
if (ret == -ENOSPC)
872
break;
873
ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
874
mod_timer(&ar_pci->rx_post_retry, jiffies +
875
ATH10K_PCI_RX_POST_RETRY_MS);
876
break;
877
}
878
num--;
879
}
880
}
881
882
void ath10k_pci_rx_post(struct ath10k *ar)
883
{
884
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
885
int i;
886
887
for (i = 0; i < CE_COUNT; i++)
888
ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
889
}
890
891
void ath10k_pci_rx_replenish_retry(struct timer_list *t)
892
{
893
struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t,
894
rx_post_retry);
895
struct ath10k *ar = ar_pci->ar;
896
897
ath10k_pci_rx_post(ar);
898
}
899
900
static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
901
{
902
u32 val = 0, region = addr & 0xfffff;
903
904
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
905
& 0x7ff) << 21;
906
val |= 0x100000 | region;
907
return val;
908
}
909
910
/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
911
* Support to access target space below 1M for qca6174 and qca9377.
912
* If target space is below 1M, the bit[20] of converted CE addr is 0.
913
* Otherwise bit[20] of converted CE addr is 1.
914
*/
915
static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
916
{
917
u32 val = 0, region = addr & 0xfffff;
918
919
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
920
& 0x7ff) << 21;
921
val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
922
return val;
923
}
924
925
static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
926
{
927
u32 val = 0, region = addr & 0xfffff;
928
929
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
930
val |= 0x100000 | region;
931
return val;
932
}
933
934
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
935
{
936
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
937
938
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
939
return -EOPNOTSUPP;
940
941
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
942
}
943
944
/*
945
* Diagnostic read/write access is provided for startup/config/debug usage.
946
* Caller must guarantee proper alignment, when applicable, and single user
947
* at any moment.
948
*/
949
#if defined(__linux__)
950
static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
951
#elif defined(__FreeBSD__)
952
static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, u8 *data,
953
#endif
954
int nbytes)
955
{
956
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
957
int ret = 0;
958
u32 *buf;
959
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
960
struct ath10k_ce_pipe *ce_diag;
961
/* Host buffer address in CE space */
962
u32 ce_data;
963
dma_addr_t ce_data_base = 0;
964
void *data_buf;
965
int i;
966
967
mutex_lock(&ar_pci->ce_diag_mutex);
968
ce_diag = ar_pci->ce_diag;
969
970
/*
971
* Allocate a temporary bounce buffer to hold caller's data
972
* to be DMA'ed from Target. This guarantees
973
* 1) 4-byte alignment
974
* 2) Buffer in DMA-able space
975
*/
976
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
977
978
data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
979
GFP_ATOMIC);
980
if (!data_buf) {
981
ret = -ENOMEM;
982
goto done;
983
}
984
985
/* The address supplied by the caller is in the
986
* Target CPU virtual address space.
987
*
988
* In order to use this address with the diagnostic CE,
989
* convert it from Target CPU virtual address space
990
* to CE address space
991
*/
992
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
993
994
remaining_bytes = nbytes;
995
ce_data = ce_data_base;
996
while (remaining_bytes) {
997
nbytes = min_t(unsigned int, remaining_bytes,
998
DIAG_TRANSFER_LIMIT);
999
1000
ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
1001
if (ret != 0)
1002
goto done;
1003
1004
/* Request CE to send from Target(!) address to Host buffer */
1005
ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
1006
if (ret)
1007
goto done;
1008
1009
i = 0;
1010
while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1011
udelay(DIAG_ACCESS_CE_WAIT_US);
1012
i += DIAG_ACCESS_CE_WAIT_US;
1013
1014
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1015
ret = -EBUSY;
1016
goto done;
1017
}
1018
}
1019
1020
i = 0;
1021
while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1022
&completed_nbytes) != 0) {
1023
udelay(DIAG_ACCESS_CE_WAIT_US);
1024
i += DIAG_ACCESS_CE_WAIT_US;
1025
1026
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1027
ret = -EBUSY;
1028
goto done;
1029
}
1030
}
1031
1032
if (nbytes != completed_nbytes) {
1033
ret = -EIO;
1034
goto done;
1035
}
1036
1037
if (*buf != ce_data) {
1038
ret = -EIO;
1039
goto done;
1040
}
1041
1042
remaining_bytes -= nbytes;
1043
memcpy(data, data_buf, nbytes);
1044
1045
address += nbytes;
1046
data += nbytes;
1047
}
1048
1049
done:
1050
1051
if (data_buf)
1052
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1053
ce_data_base);
1054
1055
mutex_unlock(&ar_pci->ce_diag_mutex);
1056
1057
return ret;
1058
}
1059
1060
static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1061
{
1062
__le32 val = 0;
1063
int ret;
1064
1065
#if defined(__linux__)
1066
ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1067
#elif defined(__FreeBSD__)
1068
ret = ath10k_pci_diag_read_mem(ar, address, (u8 *)&val, sizeof(val));
1069
#endif
1070
*value = __le32_to_cpu(val);
1071
1072
return ret;
1073
}
1074
1075
static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1076
u32 src, u32 len)
1077
{
1078
u32 host_addr, addr;
1079
int ret;
1080
1081
host_addr = host_interest_item_address(src);
1082
1083
ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1084
if (ret != 0) {
1085
ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1086
src, ret);
1087
return ret;
1088
}
1089
1090
ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1091
if (ret != 0) {
1092
ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1093
addr, len, ret);
1094
return ret;
1095
}
1096
1097
return 0;
1098
}
1099
1100
#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1101
__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1102
1103
int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1104
#if defined(__linux__)
1105
const void *data, int nbytes)
1106
#elif defined(__FreeBSD__)
1107
const void *_d, int nbytes)
1108
#endif
1109
{
1110
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1111
int ret = 0;
1112
u32 *buf;
1113
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1114
struct ath10k_ce_pipe *ce_diag;
1115
void *data_buf;
1116
dma_addr_t ce_data_base = 0;
1117
int i;
1118
#if defined(__FreeBSD__)
1119
const u8 *data = _d;
1120
#endif
1121
1122
mutex_lock(&ar_pci->ce_diag_mutex);
1123
ce_diag = ar_pci->ce_diag;
1124
1125
/*
1126
* Allocate a temporary bounce buffer to hold caller's data
1127
* to be DMA'ed to Target. This guarantees
1128
* 1) 4-byte alignment
1129
* 2) Buffer in DMA-able space
1130
*/
1131
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1132
1133
data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1134
GFP_ATOMIC);
1135
if (!data_buf) {
1136
ret = -ENOMEM;
1137
goto done;
1138
}
1139
1140
/*
1141
* The address supplied by the caller is in the
1142
* Target CPU virtual address space.
1143
*
1144
* In order to use this address with the diagnostic CE,
1145
* convert it from
1146
* Target CPU virtual address space
1147
* to
1148
* CE address space
1149
*/
1150
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1151
1152
remaining_bytes = nbytes;
1153
while (remaining_bytes) {
1154
/* FIXME: check cast */
1155
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1156
1157
/* Copy caller's data to allocated DMA buf */
1158
memcpy(data_buf, data, nbytes);
1159
1160
/* Set up to receive directly into Target(!) address */
1161
ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1162
if (ret != 0)
1163
goto done;
1164
1165
/*
1166
* Request CE to send caller-supplied data that
1167
* was copied to bounce buffer to Target(!) address.
1168
*/
1169
ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1170
if (ret != 0)
1171
goto done;
1172
1173
i = 0;
1174
while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1175
udelay(DIAG_ACCESS_CE_WAIT_US);
1176
i += DIAG_ACCESS_CE_WAIT_US;
1177
1178
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1179
ret = -EBUSY;
1180
goto done;
1181
}
1182
}
1183
1184
i = 0;
1185
while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1186
&completed_nbytes) != 0) {
1187
udelay(DIAG_ACCESS_CE_WAIT_US);
1188
i += DIAG_ACCESS_CE_WAIT_US;
1189
1190
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1191
ret = -EBUSY;
1192
goto done;
1193
}
1194
}
1195
1196
if (nbytes != completed_nbytes) {
1197
ret = -EIO;
1198
goto done;
1199
}
1200
1201
if (*buf != address) {
1202
ret = -EIO;
1203
goto done;
1204
}
1205
1206
remaining_bytes -= nbytes;
1207
address += nbytes;
1208
data += nbytes;
1209
}
1210
1211
done:
1212
if (data_buf) {
1213
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1214
ce_data_base);
1215
}
1216
1217
if (ret != 0)
1218
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1219
address, ret);
1220
1221
mutex_unlock(&ar_pci->ce_diag_mutex);
1222
1223
return ret;
1224
}
1225
1226
static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1227
{
1228
__le32 val = __cpu_to_le32(value);
1229
1230
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1231
}
1232
1233
/* Called by lower (CE) layer when a send to Target completes. */
1234
static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1235
{
1236
struct ath10k *ar = ce_state->ar;
1237
struct sk_buff_head list;
1238
struct sk_buff *skb;
1239
1240
__skb_queue_head_init(&list);
1241
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1242
/* no need to call tx completion for NULL pointers */
1243
if (skb == NULL)
1244
continue;
1245
1246
__skb_queue_tail(&list, skb);
1247
}
1248
1249
while ((skb = __skb_dequeue(&list)))
1250
ath10k_htc_tx_completion_handler(ar, skb);
1251
}
1252
1253
static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1254
void (*callback)(struct ath10k *ar,
1255
struct sk_buff *skb))
1256
{
1257
struct ath10k *ar = ce_state->ar;
1258
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1259
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1260
struct sk_buff *skb;
1261
struct sk_buff_head list;
1262
void *transfer_context;
1263
unsigned int nbytes, max_nbytes;
1264
1265
__skb_queue_head_init(&list);
1266
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1267
&nbytes) == 0) {
1268
skb = transfer_context;
1269
max_nbytes = skb->len + skb_tailroom(skb);
1270
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1271
max_nbytes, DMA_FROM_DEVICE);
1272
1273
if (unlikely(max_nbytes < nbytes)) {
1274
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1275
nbytes, max_nbytes);
1276
dev_kfree_skb_any(skb);
1277
continue;
1278
}
1279
1280
skb_put(skb, nbytes);
1281
__skb_queue_tail(&list, skb);
1282
}
1283
1284
while ((skb = __skb_dequeue(&list))) {
1285
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1286
ce_state->id, skb->len);
1287
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1288
skb->data, skb->len);
1289
1290
callback(ar, skb);
1291
}
1292
1293
ath10k_pci_rx_post_pipe(pipe_info);
1294
}
1295
1296
static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1297
void (*callback)(struct ath10k *ar,
1298
struct sk_buff *skb))
1299
{
1300
struct ath10k *ar = ce_state->ar;
1301
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1302
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1303
struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1304
struct sk_buff *skb;
1305
struct sk_buff_head list;
1306
void *transfer_context;
1307
unsigned int nbytes, max_nbytes, nentries;
1308
int orig_len;
1309
1310
/* No need to acquire ce_lock for CE5, since this is the only place CE5
1311
* is processed other than init and deinit. Before releasing CE5
1312
* buffers, interrupts are disabled. Thus CE5 access is serialized.
1313
*/
1314
__skb_queue_head_init(&list);
1315
while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1316
&nbytes) == 0) {
1317
skb = transfer_context;
1318
max_nbytes = skb->len + skb_tailroom(skb);
1319
1320
if (unlikely(max_nbytes < nbytes)) {
1321
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1322
nbytes, max_nbytes);
1323
continue;
1324
}
1325
1326
dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1327
max_nbytes, DMA_FROM_DEVICE);
1328
skb_put(skb, nbytes);
1329
__skb_queue_tail(&list, skb);
1330
}
1331
1332
nentries = skb_queue_len(&list);
1333
while ((skb = __skb_dequeue(&list))) {
1334
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1335
ce_state->id, skb->len);
1336
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1337
skb->data, skb->len);
1338
1339
orig_len = skb->len;
1340
callback(ar, skb);
1341
skb_push(skb, orig_len - skb->len);
1342
skb_reset_tail_pointer(skb);
1343
skb_trim(skb, 0);
1344
1345
/*let device gain the buffer again*/
1346
dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1347
skb->len + skb_tailroom(skb),
1348
DMA_FROM_DEVICE);
1349
}
1350
ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1351
}
1352
1353
/* Called by lower (CE) layer when data is received from the Target. */
1354
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1355
{
1356
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1357
}
1358
1359
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1360
{
1361
/* CE4 polling needs to be done whenever CE pipe which transports
1362
* HTT Rx (target->host) is processed.
1363
*/
1364
ath10k_ce_per_engine_service(ce_state->ar, 4);
1365
1366
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1367
}
1368
1369
/* Called by lower (CE) layer when data is received from the Target.
1370
* Only 10.4 firmware uses separate CE to transfer pktlog data.
1371
*/
1372
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1373
{
1374
ath10k_pci_process_rx_cb(ce_state,
1375
ath10k_htt_rx_pktlog_completion_handler);
1376
}
1377
1378
/* Called by lower (CE) layer when a send to HTT Target completes. */
1379
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1380
{
1381
struct ath10k *ar = ce_state->ar;
1382
struct sk_buff *skb;
1383
1384
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1385
/* no need to call tx completion for NULL pointers */
1386
if (!skb)
1387
continue;
1388
1389
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1390
skb->len, DMA_TO_DEVICE);
1391
ath10k_htt_hif_tx_complete(ar, skb);
1392
}
1393
}
1394
1395
static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1396
{
1397
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1398
ath10k_htt_t2h_msg_handler(ar, skb);
1399
}
1400
1401
/* Called by lower (CE) layer when HTT data is received from the Target. */
1402
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1403
{
1404
/* CE4 polling needs to be done whenever CE pipe which transports
1405
* HTT Rx (target->host) is processed.
1406
*/
1407
ath10k_ce_per_engine_service(ce_state->ar, 4);
1408
1409
ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1410
}
1411
1412
int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1413
struct ath10k_hif_sg_item *items, int n_items)
1414
{
1415
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1416
struct ath10k_ce *ce = ath10k_ce_priv(ar);
1417
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1418
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1419
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1420
unsigned int nentries_mask;
1421
unsigned int sw_index;
1422
unsigned int write_index;
1423
int err, i = 0;
1424
1425
spin_lock_bh(&ce->ce_lock);
1426
1427
nentries_mask = src_ring->nentries_mask;
1428
sw_index = src_ring->sw_index;
1429
write_index = src_ring->write_index;
1430
1431
if (unlikely(CE_RING_DELTA(nentries_mask,
1432
write_index, sw_index - 1) < n_items)) {
1433
err = -ENOBUFS;
1434
goto err;
1435
}
1436
1437
for (i = 0; i < n_items - 1; i++) {
1438
ath10k_dbg(ar, ATH10K_DBG_PCI,
1439
"pci tx item %d paddr %pad len %d n_items %d\n",
1440
i, &items[i].paddr, items[i].len, n_items);
1441
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1442
items[i].vaddr, items[i].len);
1443
1444
err = ath10k_ce_send_nolock(ce_pipe,
1445
items[i].transfer_context,
1446
items[i].paddr,
1447
items[i].len,
1448
items[i].transfer_id,
1449
CE_SEND_FLAG_GATHER);
1450
if (err)
1451
goto err;
1452
}
1453
1454
/* `i` is equal to `n_items -1` after for() */
1455
1456
ath10k_dbg(ar, ATH10K_DBG_PCI,
1457
#if defined(__linux__)
1458
"pci tx item %d paddr %pad len %d n_items %d\n",
1459
i, &items[i].paddr, items[i].len, n_items);
1460
#elif defined(__FreeBSD__)
1461
"pci tx item %d paddr %pad len %d n_items %d pipe_id %u\n",
1462
i, &items[i].paddr, items[i].len, n_items, pipe_id);
1463
/*
1464
* XXX-BZ specific debug; the DELAY makes things work for one chipset.
1465
* There's likely a race somewhere (here or LinuxKPI).
1466
*/
1467
if (n_items == 1 && items[i].len == 140) {
1468
ath10k_dbg_dump(ar, ATH10K_DBG_PCI, NULL, "pci tx data: ",
1469
items[i].vaddr, items[i].len);
1470
dump_stack();
1471
DELAY(500);
1472
}
1473
#endif
1474
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1475
items[i].vaddr, items[i].len);
1476
1477
err = ath10k_ce_send_nolock(ce_pipe,
1478
items[i].transfer_context,
1479
items[i].paddr,
1480
items[i].len,
1481
items[i].transfer_id,
1482
0);
1483
if (err)
1484
goto err;
1485
1486
spin_unlock_bh(&ce->ce_lock);
1487
return 0;
1488
1489
err:
1490
for (; i > 0; i--)
1491
__ath10k_ce_send_revert(ce_pipe);
1492
1493
spin_unlock_bh(&ce->ce_lock);
1494
return err;
1495
}
1496
1497
int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1498
size_t buf_len)
1499
{
1500
return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1501
}
1502
1503
u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1504
{
1505
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1506
1507
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1508
1509
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1510
}
1511
1512
static void ath10k_pci_dump_registers(struct ath10k *ar,
1513
struct ath10k_fw_crash_data *crash_data)
1514
{
1515
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1516
int i, ret;
1517
1518
lockdep_assert_held(&ar->dump_mutex);
1519
1520
ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1521
hi_failure_state,
1522
REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1523
if (ret) {
1524
ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1525
return;
1526
}
1527
1528
BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1529
1530
ath10k_err(ar, "firmware register dump:\n");
1531
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1532
ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1533
i,
1534
__le32_to_cpu(reg_dump_values[i]),
1535
__le32_to_cpu(reg_dump_values[i + 1]),
1536
__le32_to_cpu(reg_dump_values[i + 2]),
1537
__le32_to_cpu(reg_dump_values[i + 3]));
1538
1539
if (!crash_data)
1540
return;
1541
1542
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1543
crash_data->registers[i] = reg_dump_values[i];
1544
}
1545
1546
static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1547
const struct ath10k_mem_region *mem_region,
1548
u8 *buf, size_t buf_len)
1549
{
1550
const struct ath10k_mem_section *cur_section, *next_section;
1551
unsigned int count, section_size, skip_size;
1552
int ret, i, j;
1553
1554
if (!mem_region || !buf)
1555
return 0;
1556
1557
cur_section = &mem_region->section_table.sections[0];
1558
1559
if (mem_region->start > cur_section->start) {
1560
ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1561
mem_region->start, cur_section->start);
1562
return 0;
1563
}
1564
1565
skip_size = cur_section->start - mem_region->start;
1566
1567
/* fill the gap between the first register section and register
1568
* start address
1569
*/
1570
for (i = 0; i < skip_size; i++) {
1571
*buf = ATH10K_MAGIC_NOT_COPIED;
1572
buf++;
1573
}
1574
1575
count = 0;
1576
1577
for (i = 0; cur_section != NULL; i++) {
1578
section_size = cur_section->end - cur_section->start;
1579
1580
if (section_size <= 0) {
1581
ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1582
cur_section->start,
1583
cur_section->end);
1584
break;
1585
}
1586
1587
if ((i + 1) == mem_region->section_table.size) {
1588
/* last section */
1589
next_section = NULL;
1590
skip_size = 0;
1591
} else {
1592
next_section = cur_section + 1;
1593
1594
if (cur_section->end > next_section->start) {
1595
ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1596
next_section->start,
1597
cur_section->end);
1598
break;
1599
}
1600
1601
skip_size = next_section->start - cur_section->end;
1602
}
1603
1604
if (buf_len < (skip_size + section_size)) {
1605
ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1606
break;
1607
}
1608
1609
buf_len -= skip_size + section_size;
1610
1611
/* read section to dest memory */
1612
ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1613
buf, section_size);
1614
if (ret) {
1615
ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1616
cur_section->start, ret);
1617
break;
1618
}
1619
1620
buf += section_size;
1621
count += section_size;
1622
1623
/* fill in the gap between this section and the next */
1624
for (j = 0; j < skip_size; j++) {
1625
*buf = ATH10K_MAGIC_NOT_COPIED;
1626
buf++;
1627
}
1628
1629
count += skip_size;
1630
1631
if (!next_section)
1632
/* this was the last section */
1633
break;
1634
1635
cur_section = next_section;
1636
}
1637
1638
return count;
1639
}
1640
1641
static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1642
{
1643
u32 val;
1644
1645
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1646
FW_RAM_CONFIG_ADDRESS, config);
1647
1648
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1649
FW_RAM_CONFIG_ADDRESS);
1650
if (val != config) {
1651
ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1652
val, config);
1653
return -EIO;
1654
}
1655
1656
return 0;
1657
}
1658
1659
/* Always returns the length */
1660
static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1661
const struct ath10k_mem_region *region,
1662
u8 *buf)
1663
{
1664
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1665
u32 base_addr, i;
1666
1667
#if defined(__linux__)
1668
base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1669
#elif defined(__FreeBSD__)
1670
base_addr = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_PCIE_BAR0_START_REG);
1671
#endif
1672
base_addr += region->start;
1673
1674
for (i = 0; i < region->len; i += 4) {
1675
#if defined(__linux__)
1676
iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1677
*(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1678
#elif defined(__FreeBSD__)
1679
bus_write_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_ADDR_REG, base_addr + i);
1680
*(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_DATA_REG);
1681
#endif
1682
}
1683
1684
return region->len;
1685
}
1686
1687
/* if an error happened returns < 0, otherwise the length */
1688
static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1689
const struct ath10k_mem_region *region,
1690
u8 *buf)
1691
{
1692
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1693
u32 i;
1694
int ret;
1695
1696
mutex_lock(&ar->conf_mutex);
1697
if (ar->state != ATH10K_STATE_ON) {
1698
ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1699
ret = -EIO;
1700
goto done;
1701
}
1702
1703
for (i = 0; i < region->len; i += 4)
1704
#if defined(__linux__)
1705
*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1706
#elif defined(__FreeBSD__)
1707
*(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, region->start + i);
1708
#endif
1709
1710
ret = region->len;
1711
done:
1712
mutex_unlock(&ar->conf_mutex);
1713
return ret;
1714
}
1715
1716
/* if an error happened returns < 0, otherwise the length */
1717
static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1718
const struct ath10k_mem_region *current_region,
1719
u8 *buf)
1720
{
1721
int ret;
1722
1723
if (current_region->section_table.size > 0)
1724
/* Copy each section individually. */
1725
return ath10k_pci_dump_memory_section(ar,
1726
current_region,
1727
buf,
1728
current_region->len);
1729
1730
/* No individual memory sections defined so we can
1731
* copy the entire memory region.
1732
*/
1733
ret = ath10k_pci_diag_read_mem(ar,
1734
current_region->start,
1735
buf,
1736
current_region->len);
1737
if (ret) {
1738
ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1739
current_region->name, ret);
1740
return ret;
1741
}
1742
1743
return current_region->len;
1744
}
1745
1746
static void ath10k_pci_dump_memory(struct ath10k *ar,
1747
struct ath10k_fw_crash_data *crash_data)
1748
{
1749
const struct ath10k_hw_mem_layout *mem_layout;
1750
const struct ath10k_mem_region *current_region;
1751
struct ath10k_dump_ram_data_hdr *hdr;
1752
u32 count, shift;
1753
size_t buf_len;
1754
int ret, i;
1755
u8 *buf;
1756
1757
lockdep_assert_held(&ar->dump_mutex);
1758
1759
if (!crash_data)
1760
return;
1761
1762
mem_layout = ath10k_coredump_get_mem_layout(ar);
1763
if (!mem_layout)
1764
return;
1765
1766
current_region = &mem_layout->region_table.regions[0];
1767
1768
buf = crash_data->ramdump_buf;
1769
buf_len = crash_data->ramdump_buf_len;
1770
1771
memset(buf, 0, buf_len);
1772
1773
for (i = 0; i < mem_layout->region_table.size; i++) {
1774
count = 0;
1775
1776
if (current_region->len > buf_len) {
1777
ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1778
current_region->name,
1779
current_region->len,
1780
buf_len);
1781
break;
1782
}
1783
1784
/* To get IRAM dump, the host driver needs to switch target
1785
* ram config from DRAM to IRAM.
1786
*/
1787
if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1788
current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1789
shift = current_region->start >> 20;
1790
1791
ret = ath10k_pci_set_ram_config(ar, shift);
1792
if (ret) {
1793
ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1794
current_region->name, ret);
1795
break;
1796
}
1797
}
1798
1799
/* Reserve space for the header. */
1800
hdr = (void *)buf;
1801
buf += sizeof(*hdr);
1802
buf_len -= sizeof(*hdr);
1803
1804
switch (current_region->type) {
1805
case ATH10K_MEM_REGION_TYPE_IOSRAM:
1806
count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1807
break;
1808
case ATH10K_MEM_REGION_TYPE_IOREG:
1809
ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1810
if (ret < 0)
1811
break;
1812
1813
count = ret;
1814
break;
1815
default:
1816
ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1817
if (ret < 0)
1818
break;
1819
1820
count = ret;
1821
break;
1822
}
1823
1824
hdr->region_type = cpu_to_le32(current_region->type);
1825
hdr->start = cpu_to_le32(current_region->start);
1826
hdr->length = cpu_to_le32(count);
1827
1828
if (count == 0)
1829
/* Note: the header remains, just with zero length. */
1830
break;
1831
1832
buf += count;
1833
buf_len -= count;
1834
1835
current_region++;
1836
}
1837
}
1838
1839
static void ath10k_pci_fw_dump_work(struct work_struct *work)
1840
{
1841
struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1842
dump_work);
1843
struct ath10k_fw_crash_data *crash_data;
1844
struct ath10k *ar = ar_pci->ar;
1845
char guid[UUID_STRING_LEN + 1];
1846
1847
mutex_lock(&ar->dump_mutex);
1848
1849
spin_lock_bh(&ar->data_lock);
1850
ar->stats.fw_crash_counter++;
1851
spin_unlock_bh(&ar->data_lock);
1852
1853
crash_data = ath10k_coredump_new(ar);
1854
1855
if (crash_data)
1856
scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1857
else
1858
scnprintf(guid, sizeof(guid), "n/a");
1859
1860
ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1861
ath10k_print_driver_info(ar);
1862
ath10k_pci_dump_registers(ar, crash_data);
1863
ath10k_ce_dump_registers(ar, crash_data);
1864
ath10k_pci_dump_memory(ar, crash_data);
1865
1866
mutex_unlock(&ar->dump_mutex);
1867
1868
ath10k_core_start_recovery(ar);
1869
}
1870
1871
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1872
{
1873
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1874
1875
queue_work(ar->workqueue, &ar_pci->dump_work);
1876
}
1877
1878
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1879
int force)
1880
{
1881
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1882
1883
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1884
1885
if (!force) {
1886
int resources;
1887
/*
1888
* Decide whether to actually poll for completions, or just
1889
* wait for a later chance.
1890
* If there seem to be plenty of resources left, then just wait
1891
* since checking involves reading a CE register, which is a
1892
* relatively expensive operation.
1893
*/
1894
resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1895
1896
/*
1897
* If at least 50% of the total resources are still available,
1898
* don't bother checking again yet.
1899
*/
1900
if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
1901
return;
1902
}
1903
ath10k_ce_per_engine_service(ar, pipe);
1904
}
1905
1906
static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1907
{
1908
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1909
1910
timer_delete_sync(&ar_pci->rx_post_retry);
1911
}
1912
1913
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1914
u8 *ul_pipe, u8 *dl_pipe)
1915
{
1916
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1917
const struct ce_service_to_pipe *entry;
1918
bool ul_set = false, dl_set = false;
1919
int i;
1920
1921
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1922
1923
for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
1924
entry = &ar_pci->serv_to_pipe[i];
1925
1926
if (__le32_to_cpu(entry->service_id) != service_id)
1927
continue;
1928
1929
switch (__le32_to_cpu(entry->pipedir)) {
1930
case PIPEDIR_NONE:
1931
break;
1932
case PIPEDIR_IN:
1933
WARN_ON(dl_set);
1934
*dl_pipe = __le32_to_cpu(entry->pipenum);
1935
dl_set = true;
1936
break;
1937
case PIPEDIR_OUT:
1938
WARN_ON(ul_set);
1939
*ul_pipe = __le32_to_cpu(entry->pipenum);
1940
ul_set = true;
1941
break;
1942
case PIPEDIR_INOUT:
1943
WARN_ON(dl_set);
1944
WARN_ON(ul_set);
1945
*dl_pipe = __le32_to_cpu(entry->pipenum);
1946
*ul_pipe = __le32_to_cpu(entry->pipenum);
1947
dl_set = true;
1948
ul_set = true;
1949
break;
1950
}
1951
}
1952
1953
if (!ul_set || !dl_set)
1954
return -ENOENT;
1955
1956
return 0;
1957
}
1958
1959
void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1960
u8 *ul_pipe, u8 *dl_pipe)
1961
{
1962
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1963
1964
(void)ath10k_pci_hif_map_service_to_pipe(ar,
1965
ATH10K_HTC_SVC_ID_RSVD_CTRL,
1966
ul_pipe, dl_pipe);
1967
}
1968
1969
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1970
{
1971
u32 val;
1972
1973
switch (ar->hw_rev) {
1974
case ATH10K_HW_QCA988X:
1975
case ATH10K_HW_QCA9887:
1976
case ATH10K_HW_QCA6174:
1977
case ATH10K_HW_QCA9377:
1978
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1979
CORE_CTRL_ADDRESS);
1980
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1981
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1982
CORE_CTRL_ADDRESS, val);
1983
break;
1984
case ATH10K_HW_QCA99X0:
1985
case ATH10K_HW_QCA9984:
1986
case ATH10K_HW_QCA9888:
1987
case ATH10K_HW_QCA4019:
1988
/* TODO: Find appropriate register configuration for QCA99X0
1989
* to mask irq/MSI.
1990
*/
1991
break;
1992
case ATH10K_HW_WCN3990:
1993
break;
1994
}
1995
}
1996
1997
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1998
{
1999
u32 val;
2000
2001
switch (ar->hw_rev) {
2002
case ATH10K_HW_QCA988X:
2003
case ATH10K_HW_QCA9887:
2004
case ATH10K_HW_QCA6174:
2005
case ATH10K_HW_QCA9377:
2006
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2007
CORE_CTRL_ADDRESS);
2008
val |= CORE_CTRL_PCIE_REG_31_MASK;
2009
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2010
CORE_CTRL_ADDRESS, val);
2011
break;
2012
case ATH10K_HW_QCA99X0:
2013
case ATH10K_HW_QCA9984:
2014
case ATH10K_HW_QCA9888:
2015
case ATH10K_HW_QCA4019:
2016
/* TODO: Find appropriate register configuration for QCA99X0
2017
* to unmask irq/MSI.
2018
*/
2019
break;
2020
case ATH10K_HW_WCN3990:
2021
break;
2022
}
2023
}
2024
2025
static void ath10k_pci_irq_disable(struct ath10k *ar)
2026
{
2027
ath10k_ce_disable_interrupts(ar);
2028
ath10k_pci_disable_and_clear_intx_irq(ar);
2029
ath10k_pci_irq_msi_fw_mask(ar);
2030
}
2031
2032
static void ath10k_pci_irq_sync(struct ath10k *ar)
2033
{
2034
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2035
2036
synchronize_irq(ar_pci->pdev->irq);
2037
}
2038
2039
static void ath10k_pci_irq_enable(struct ath10k *ar)
2040
{
2041
ath10k_ce_enable_interrupts(ar);
2042
ath10k_pci_enable_intx_irq(ar);
2043
ath10k_pci_irq_msi_fw_unmask(ar);
2044
}
2045
2046
static int ath10k_pci_hif_start(struct ath10k *ar)
2047
{
2048
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2049
2050
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
2051
2052
ath10k_core_napi_enable(ar);
2053
2054
ath10k_pci_irq_enable(ar);
2055
ath10k_pci_rx_post(ar);
2056
2057
pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2058
PCI_EXP_LNKCTL_ASPMC,
2059
ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC);
2060
2061
return 0;
2062
}
2063
2064
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2065
{
2066
struct ath10k *ar;
2067
struct ath10k_ce_pipe *ce_pipe;
2068
struct ath10k_ce_ring *ce_ring;
2069
struct sk_buff *skb;
2070
int i;
2071
2072
ar = pci_pipe->hif_ce_state;
2073
ce_pipe = pci_pipe->ce_hdl;
2074
ce_ring = ce_pipe->dest_ring;
2075
2076
if (!ce_ring)
2077
return;
2078
2079
if (!pci_pipe->buf_sz)
2080
return;
2081
2082
for (i = 0; i < ce_ring->nentries; i++) {
2083
skb = ce_ring->per_transfer_context[i];
2084
if (!skb)
2085
continue;
2086
2087
ce_ring->per_transfer_context[i] = NULL;
2088
2089
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
2090
skb->len + skb_tailroom(skb),
2091
DMA_FROM_DEVICE);
2092
dev_kfree_skb_any(skb);
2093
}
2094
}
2095
2096
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2097
{
2098
struct ath10k *ar;
2099
struct ath10k_ce_pipe *ce_pipe;
2100
struct ath10k_ce_ring *ce_ring;
2101
struct sk_buff *skb;
2102
int i;
2103
2104
ar = pci_pipe->hif_ce_state;
2105
ce_pipe = pci_pipe->ce_hdl;
2106
ce_ring = ce_pipe->src_ring;
2107
2108
if (!ce_ring)
2109
return;
2110
2111
if (!pci_pipe->buf_sz)
2112
return;
2113
2114
for (i = 0; i < ce_ring->nentries; i++) {
2115
skb = ce_ring->per_transfer_context[i];
2116
if (!skb)
2117
continue;
2118
2119
ce_ring->per_transfer_context[i] = NULL;
2120
2121
ath10k_htc_tx_completion_handler(ar, skb);
2122
}
2123
}
2124
2125
/*
2126
* Cleanup residual buffers for device shutdown:
2127
* buffers that were enqueued for receive
2128
* buffers that were to be sent
2129
* Note: Buffers that had completed but which were
2130
* not yet processed are on a completion queue. They
2131
* are handled when the completion thread shuts down.
2132
*/
2133
static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2134
{
2135
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2136
int pipe_num;
2137
2138
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2139
struct ath10k_pci_pipe *pipe_info;
2140
2141
pipe_info = &ar_pci->pipe_info[pipe_num];
2142
ath10k_pci_rx_pipe_cleanup(pipe_info);
2143
ath10k_pci_tx_pipe_cleanup(pipe_info);
2144
}
2145
}
2146
2147
void ath10k_pci_ce_deinit(struct ath10k *ar)
2148
{
2149
int i;
2150
2151
for (i = 0; i < CE_COUNT; i++)
2152
ath10k_ce_deinit_pipe(ar, i);
2153
}
2154
2155
void ath10k_pci_flush(struct ath10k *ar)
2156
{
2157
ath10k_pci_rx_retry_sync(ar);
2158
ath10k_pci_buffer_cleanup(ar);
2159
}
2160
2161
static void ath10k_pci_hif_stop(struct ath10k *ar)
2162
{
2163
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2164
unsigned long flags;
2165
2166
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2167
2168
ath10k_pci_irq_disable(ar);
2169
ath10k_pci_irq_sync(ar);
2170
2171
ath10k_core_napi_sync_disable(ar);
2172
2173
cancel_work_sync(&ar_pci->dump_work);
2174
2175
/* Most likely the device has HTT Rx ring configured. The only way to
2176
* prevent the device from accessing (and possible corrupting) host
2177
* memory is to reset the chip now.
2178
*
2179
* There's also no known way of masking MSI interrupts on the device.
2180
* For ranged MSI the CE-related interrupts can be masked. However
2181
* regardless how many MSI interrupts are assigned the first one
2182
* is always used for firmware indications (crashes) and cannot be
2183
* masked. To prevent the device from asserting the interrupt reset it
2184
* before proceeding with cleanup.
2185
*/
2186
ath10k_pci_safe_chip_reset(ar);
2187
2188
ath10k_pci_flush(ar);
2189
2190
spin_lock_irqsave(&ar_pci->ps_lock, flags);
2191
WARN_ON(ar_pci->ps_wake_refcount > 0);
2192
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2193
}
2194
2195
int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2196
void *req, u32 req_len,
2197
void *resp, u32 *resp_len)
2198
{
2199
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2200
struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2201
struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2202
struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2203
struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2204
dma_addr_t req_paddr = 0;
2205
dma_addr_t resp_paddr = 0;
2206
struct bmi_xfer xfer = {};
2207
void *treq, *tresp = NULL;
2208
int ret = 0;
2209
2210
might_sleep();
2211
2212
if (resp && !resp_len)
2213
return -EINVAL;
2214
2215
if (resp && resp_len && *resp_len == 0)
2216
return -EINVAL;
2217
2218
treq = kmemdup(req, req_len, GFP_KERNEL);
2219
if (!treq)
2220
return -ENOMEM;
2221
2222
req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2223
ret = dma_mapping_error(ar->dev, req_paddr);
2224
if (ret) {
2225
ret = -EIO;
2226
goto err_dma;
2227
}
2228
2229
if (resp && resp_len) {
2230
tresp = kzalloc(*resp_len, GFP_KERNEL);
2231
if (!tresp) {
2232
ret = -ENOMEM;
2233
goto err_req;
2234
}
2235
2236
resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2237
DMA_FROM_DEVICE);
2238
ret = dma_mapping_error(ar->dev, resp_paddr);
2239
if (ret) {
2240
ret = -EIO;
2241
goto err_req;
2242
}
2243
2244
xfer.wait_for_resp = true;
2245
xfer.resp_len = 0;
2246
2247
ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2248
}
2249
2250
ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2251
if (ret)
2252
goto err_resp;
2253
2254
ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2255
if (ret) {
2256
dma_addr_t unused_buffer;
2257
unsigned int unused_nbytes;
2258
unsigned int unused_id;
2259
2260
ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2261
&unused_nbytes, &unused_id);
2262
} else {
2263
/* non-zero means we did not time out */
2264
ret = 0;
2265
}
2266
2267
err_resp:
2268
if (resp) {
2269
dma_addr_t unused_buffer;
2270
2271
ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2272
dma_unmap_single(ar->dev, resp_paddr,
2273
*resp_len, DMA_FROM_DEVICE);
2274
}
2275
err_req:
2276
dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2277
2278
if (ret == 0 && resp_len) {
2279
*resp_len = min(*resp_len, xfer.resp_len);
2280
memcpy(resp, tresp, *resp_len);
2281
}
2282
err_dma:
2283
kfree(treq);
2284
kfree(tresp);
2285
2286
return ret;
2287
}
2288
2289
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2290
{
2291
struct bmi_xfer *xfer;
2292
2293
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2294
return;
2295
2296
xfer->tx_done = true;
2297
}
2298
2299
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2300
{
2301
struct ath10k *ar = ce_state->ar;
2302
struct bmi_xfer *xfer;
2303
unsigned int nbytes;
2304
2305
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2306
&nbytes))
2307
return;
2308
2309
if (WARN_ON_ONCE(!xfer))
2310
return;
2311
2312
if (!xfer->wait_for_resp) {
2313
ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2314
return;
2315
}
2316
2317
xfer->resp_len = nbytes;
2318
xfer->rx_done = true;
2319
}
2320
2321
static int ath10k_pci_bmi_wait(struct ath10k *ar,
2322
struct ath10k_ce_pipe *tx_pipe,
2323
struct ath10k_ce_pipe *rx_pipe,
2324
struct bmi_xfer *xfer)
2325
{
2326
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2327
unsigned long started = jiffies;
2328
unsigned long dur;
2329
int ret;
2330
2331
while (time_before_eq(jiffies, timeout)) {
2332
ath10k_pci_bmi_send_done(tx_pipe);
2333
ath10k_pci_bmi_recv_data(rx_pipe);
2334
2335
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2336
ret = 0;
2337
goto out;
2338
}
2339
2340
#if defined(__linux__)
2341
schedule();
2342
#elif defined(__FreeBSD__)
2343
/*
2344
* Using LinuxKPI's schedule() will hang for-ever as there is
2345
* no wake_up. Poll about 100 times per second until timeout.
2346
*/
2347
schedule_timeout(BMI_COMMUNICATION_TIMEOUT_HZ/300);
2348
#endif
2349
}
2350
2351
ret = -ETIMEDOUT;
2352
2353
out:
2354
dur = jiffies - started;
2355
if (dur > HZ)
2356
ath10k_dbg(ar, ATH10K_DBG_BMI,
2357
"bmi cmd took %lu jiffies hz %d ret %d\n",
2358
dur, HZ, ret);
2359
return ret;
2360
}
2361
2362
/*
2363
* Send an interrupt to the device to wake up the Target CPU
2364
* so it has an opportunity to notice any changed state.
2365
*/
2366
static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2367
{
2368
u32 addr, val;
2369
2370
addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2371
val = ath10k_pci_read32(ar, addr);
2372
val |= CORE_CTRL_CPU_INTR_MASK;
2373
ath10k_pci_write32(ar, addr, val);
2374
2375
return 0;
2376
}
2377
2378
static int ath10k_pci_get_num_banks(struct ath10k *ar)
2379
{
2380
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2381
2382
switch (ar_pci->pdev->device) {
2383
case QCA988X_2_0_DEVICE_ID_UBNT:
2384
case QCA988X_2_0_DEVICE_ID:
2385
case QCA99X0_2_0_DEVICE_ID:
2386
case QCA9888_2_0_DEVICE_ID:
2387
case QCA9984_1_0_DEVICE_ID:
2388
case QCA9887_1_0_DEVICE_ID:
2389
return 1;
2390
case QCA6164_2_1_DEVICE_ID:
2391
case QCA6174_2_1_DEVICE_ID:
2392
switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2393
case QCA6174_HW_1_0_CHIP_ID_REV:
2394
case QCA6174_HW_1_1_CHIP_ID_REV:
2395
case QCA6174_HW_2_1_CHIP_ID_REV:
2396
case QCA6174_HW_2_2_CHIP_ID_REV:
2397
return 3;
2398
case QCA6174_HW_1_3_CHIP_ID_REV:
2399
return 2;
2400
case QCA6174_HW_3_0_CHIP_ID_REV:
2401
case QCA6174_HW_3_1_CHIP_ID_REV:
2402
case QCA6174_HW_3_2_CHIP_ID_REV:
2403
return 9;
2404
}
2405
break;
2406
case QCA9377_1_0_DEVICE_ID:
2407
return 9;
2408
}
2409
2410
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2411
return 1;
2412
}
2413
2414
static int ath10k_bus_get_num_banks(struct ath10k *ar)
2415
{
2416
struct ath10k_ce *ce = ath10k_ce_priv(ar);
2417
2418
return ce->bus_ops->get_num_banks(ar);
2419
}
2420
2421
int ath10k_pci_init_config(struct ath10k *ar)
2422
{
2423
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2424
u32 interconnect_targ_addr;
2425
u32 pcie_state_targ_addr = 0;
2426
u32 pipe_cfg_targ_addr = 0;
2427
u32 svc_to_pipe_map = 0;
2428
u32 pcie_config_flags = 0;
2429
u32 ealloc_value;
2430
u32 ealloc_targ_addr;
2431
u32 flag2_value;
2432
u32 flag2_targ_addr;
2433
int ret = 0;
2434
2435
/* Download to Target the CE Config and the service-to-CE map */
2436
interconnect_targ_addr =
2437
host_interest_item_address(HI_ITEM(hi_interconnect_state));
2438
2439
/* Supply Target-side CE configuration */
2440
ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2441
&pcie_state_targ_addr);
2442
if (ret != 0) {
2443
ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2444
return ret;
2445
}
2446
2447
if (pcie_state_targ_addr == 0) {
2448
ret = -EIO;
2449
ath10k_err(ar, "Invalid pcie state addr\n");
2450
return ret;
2451
}
2452
2453
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2454
offsetof(struct pcie_state,
2455
pipe_cfg_addr)),
2456
&pipe_cfg_targ_addr);
2457
if (ret != 0) {
2458
ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2459
return ret;
2460
}
2461
2462
if (pipe_cfg_targ_addr == 0) {
2463
ret = -EIO;
2464
ath10k_err(ar, "Invalid pipe cfg addr\n");
2465
return ret;
2466
}
2467
2468
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2469
ar_pci->pipe_config,
2470
sizeof(struct ce_pipe_config) *
2471
NUM_TARGET_CE_CONFIG_WLAN);
2472
2473
if (ret != 0) {
2474
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2475
return ret;
2476
}
2477
2478
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2479
offsetof(struct pcie_state,
2480
svc_to_pipe_map)),
2481
&svc_to_pipe_map);
2482
if (ret != 0) {
2483
ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2484
return ret;
2485
}
2486
2487
if (svc_to_pipe_map == 0) {
2488
ret = -EIO;
2489
ath10k_err(ar, "Invalid svc_to_pipe map\n");
2490
return ret;
2491
}
2492
2493
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2494
ar_pci->serv_to_pipe,
2495
sizeof(pci_target_service_to_ce_map_wlan));
2496
if (ret != 0) {
2497
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2498
return ret;
2499
}
2500
2501
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2502
offsetof(struct pcie_state,
2503
config_flags)),
2504
&pcie_config_flags);
2505
if (ret != 0) {
2506
ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2507
return ret;
2508
}
2509
2510
pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2511
2512
ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2513
offsetof(struct pcie_state,
2514
config_flags)),
2515
pcie_config_flags);
2516
if (ret != 0) {
2517
ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2518
return ret;
2519
}
2520
2521
/* configure early allocation */
2522
ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2523
2524
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2525
if (ret != 0) {
2526
ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2527
return ret;
2528
}
2529
2530
/* first bank is switched to IRAM */
2531
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2532
HI_EARLY_ALLOC_MAGIC_MASK);
2533
ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2534
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2535
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2536
2537
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2538
if (ret != 0) {
2539
ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2540
return ret;
2541
}
2542
2543
/* Tell Target to proceed with initialization */
2544
flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2545
2546
ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2547
if (ret != 0) {
2548
ath10k_err(ar, "Failed to get option val: %d\n", ret);
2549
return ret;
2550
}
2551
2552
flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2553
2554
ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2555
if (ret != 0) {
2556
ath10k_err(ar, "Failed to set option val: %d\n", ret);
2557
return ret;
2558
}
2559
2560
return 0;
2561
}
2562
2563
static void ath10k_pci_override_ce_config(struct ath10k *ar)
2564
{
2565
struct ce_attr *attr;
2566
struct ce_pipe_config *config;
2567
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2568
2569
/* For QCA6174 we're overriding the Copy Engine 5 configuration,
2570
* since it is currently used for other feature.
2571
*/
2572
2573
/* Override Host's Copy Engine 5 configuration */
2574
attr = &ar_pci->attr[5];
2575
attr->src_sz_max = 0;
2576
attr->dest_nentries = 0;
2577
2578
/* Override Target firmware's Copy Engine configuration */
2579
config = &ar_pci->pipe_config[5];
2580
config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2581
config->nbytes_max = __cpu_to_le32(2048);
2582
2583
/* Map from service/endpoint to Copy Engine */
2584
ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
2585
}
2586
2587
int ath10k_pci_alloc_pipes(struct ath10k *ar)
2588
{
2589
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2590
struct ath10k_pci_pipe *pipe;
2591
struct ath10k_ce *ce = ath10k_ce_priv(ar);
2592
int i, ret;
2593
2594
for (i = 0; i < CE_COUNT; i++) {
2595
pipe = &ar_pci->pipe_info[i];
2596
pipe->ce_hdl = &ce->ce_states[i];
2597
pipe->pipe_num = i;
2598
pipe->hif_ce_state = ar;
2599
2600
ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
2601
if (ret) {
2602
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2603
i, ret);
2604
return ret;
2605
}
2606
2607
/* Last CE is Diagnostic Window */
2608
if (i == CE_DIAG_PIPE) {
2609
ar_pci->ce_diag = pipe->ce_hdl;
2610
continue;
2611
}
2612
2613
pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
2614
}
2615
2616
return 0;
2617
}
2618
2619
void ath10k_pci_free_pipes(struct ath10k *ar)
2620
{
2621
int i;
2622
2623
for (i = 0; i < CE_COUNT; i++)
2624
ath10k_ce_free_pipe(ar, i);
2625
}
2626
2627
int ath10k_pci_init_pipes(struct ath10k *ar)
2628
{
2629
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2630
int i, ret;
2631
2632
for (i = 0; i < CE_COUNT; i++) {
2633
ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
2634
if (ret) {
2635
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2636
i, ret);
2637
return ret;
2638
}
2639
}
2640
2641
return 0;
2642
}
2643
2644
static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2645
{
2646
return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2647
FW_IND_EVENT_PENDING;
2648
}
2649
2650
static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2651
{
2652
u32 val;
2653
2654
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2655
val &= ~FW_IND_EVENT_PENDING;
2656
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2657
}
2658
2659
static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2660
{
2661
u32 val;
2662
2663
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2664
return (val == 0xffffffff);
2665
}
2666
2667
/* this function effectively clears target memory controller assert line */
2668
static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2669
{
2670
u32 val;
2671
2672
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2673
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2674
val | SOC_RESET_CONTROL_SI0_RST_MASK);
2675
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2676
2677
msleep(10);
2678
2679
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2680
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2681
val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2682
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2683
2684
msleep(10);
2685
}
2686
2687
static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2688
{
2689
u32 val;
2690
2691
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2692
2693
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2694
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2695
val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2696
}
2697
2698
static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2699
{
2700
u32 val;
2701
2702
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2703
2704
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2705
val | SOC_RESET_CONTROL_CE_RST_MASK);
2706
msleep(10);
2707
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2708
val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2709
}
2710
2711
static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2712
{
2713
u32 val;
2714
2715
val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
2716
ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
2717
val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2718
}
2719
2720
static int ath10k_pci_warm_reset(struct ath10k *ar)
2721
{
2722
int ret;
2723
2724
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2725
2726
spin_lock_bh(&ar->data_lock);
2727
ar->stats.fw_warm_reset_counter++;
2728
spin_unlock_bh(&ar->data_lock);
2729
2730
ath10k_pci_irq_disable(ar);
2731
2732
/* Make sure the target CPU is not doing anything dangerous, e.g. if it
2733
* were to access copy engine while host performs copy engine reset
2734
* then it is possible for the device to confuse pci-e controller to
2735
* the point of bringing host system to a complete stop (i.e. hang).
2736
*/
2737
ath10k_pci_warm_reset_si0(ar);
2738
ath10k_pci_warm_reset_cpu(ar);
2739
ath10k_pci_init_pipes(ar);
2740
ath10k_pci_wait_for_target_init(ar);
2741
2742
ath10k_pci_warm_reset_clear_lf(ar);
2743
ath10k_pci_warm_reset_ce(ar);
2744
ath10k_pci_warm_reset_cpu(ar);
2745
ath10k_pci_init_pipes(ar);
2746
2747
ret = ath10k_pci_wait_for_target_init(ar);
2748
if (ret) {
2749
ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2750
return ret;
2751
}
2752
2753
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2754
2755
return 0;
2756
}
2757
2758
static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2759
{
2760
ath10k_pci_irq_disable(ar);
2761
return ath10k_pci_qca99x0_chip_reset(ar);
2762
}
2763
2764
static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2765
{
2766
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2767
2768
if (!ar_pci->pci_soft_reset)
2769
return -EOPNOTSUPP;
2770
2771
return ar_pci->pci_soft_reset(ar);
2772
}
2773
2774
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2775
{
2776
int i, ret;
2777
u32 val;
2778
2779
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2780
2781
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2782
* It is thus preferred to use warm reset which is safer but may not be
2783
* able to recover the device from all possible fail scenarios.
2784
*
2785
* Warm reset doesn't always work on first try so attempt it a few
2786
* times before giving up.
2787
*/
2788
for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2789
ret = ath10k_pci_warm_reset(ar);
2790
if (ret) {
2791
ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2792
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2793
ret);
2794
continue;
2795
}
2796
2797
/* FIXME: Sometimes copy engine doesn't recover after warm
2798
* reset. In most cases this needs cold reset. In some of these
2799
* cases the device is in such a state that a cold reset may
2800
* lock up the host.
2801
*
2802
* Reading any host interest register via copy engine is
2803
* sufficient to verify if device is capable of booting
2804
* firmware blob.
2805
*/
2806
ret = ath10k_pci_init_pipes(ar);
2807
if (ret) {
2808
ath10k_warn(ar, "failed to init copy engine: %d\n",
2809
ret);
2810
continue;
2811
}
2812
2813
ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2814
&val);
2815
if (ret) {
2816
ath10k_warn(ar, "failed to poke copy engine: %d\n",
2817
ret);
2818
continue;
2819
}
2820
2821
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2822
return 0;
2823
}
2824
2825
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2826
ath10k_warn(ar, "refusing cold reset as requested\n");
2827
return -EPERM;
2828
}
2829
2830
ret = ath10k_pci_cold_reset(ar);
2831
if (ret) {
2832
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2833
return ret;
2834
}
2835
2836
ret = ath10k_pci_wait_for_target_init(ar);
2837
if (ret) {
2838
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2839
ret);
2840
return ret;
2841
}
2842
2843
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2844
2845
return 0;
2846
}
2847
2848
static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2849
{
2850
int ret;
2851
2852
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2853
2854
/* FIXME: QCA6174 requires cold + warm reset to work. */
2855
2856
ret = ath10k_pci_cold_reset(ar);
2857
if (ret) {
2858
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2859
return ret;
2860
}
2861
2862
ret = ath10k_pci_wait_for_target_init(ar);
2863
if (ret) {
2864
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2865
ret);
2866
return ret;
2867
}
2868
2869
ret = ath10k_pci_warm_reset(ar);
2870
if (ret) {
2871
ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2872
return ret;
2873
}
2874
2875
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2876
2877
return 0;
2878
}
2879
2880
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2881
{
2882
int ret;
2883
2884
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2885
2886
ret = ath10k_pci_cold_reset(ar);
2887
if (ret) {
2888
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2889
return ret;
2890
}
2891
2892
ret = ath10k_pci_wait_for_target_init(ar);
2893
if (ret) {
2894
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2895
ret);
2896
return ret;
2897
}
2898
2899
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2900
2901
return 0;
2902
}
2903
2904
static int ath10k_pci_chip_reset(struct ath10k *ar)
2905
{
2906
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2907
2908
if (WARN_ON(!ar_pci->pci_hard_reset))
2909
return -EOPNOTSUPP;
2910
2911
return ar_pci->pci_hard_reset(ar);
2912
}
2913
2914
static int ath10k_pci_hif_power_up(struct ath10k *ar,
2915
enum ath10k_firmware_mode fw_mode)
2916
{
2917
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2918
int ret;
2919
2920
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2921
2922
pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2923
&ar_pci->link_ctl);
2924
pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2925
PCI_EXP_LNKCTL_ASPMC);
2926
2927
/*
2928
* Bring the target up cleanly.
2929
*
2930
* The target may be in an undefined state with an AUX-powered Target
2931
* and a Host in WoW mode. If the Host crashes, loses power, or is
2932
* restarted (without unloading the driver) then the Target is left
2933
* (aux) powered and running. On a subsequent driver load, the Target
2934
* is in an unexpected state. We try to catch that here in order to
2935
* reset the Target and retry the probe.
2936
*/
2937
ret = ath10k_pci_chip_reset(ar);
2938
if (ret) {
2939
if (ath10k_pci_has_fw_crashed(ar)) {
2940
ath10k_warn(ar, "firmware crashed during chip reset\n");
2941
ath10k_pci_fw_crashed_clear(ar);
2942
ath10k_pci_fw_crashed_dump(ar);
2943
}
2944
2945
ath10k_err(ar, "failed to reset chip: %d\n", ret);
2946
goto err_sleep;
2947
}
2948
2949
ret = ath10k_pci_init_pipes(ar);
2950
if (ret) {
2951
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2952
goto err_sleep;
2953
}
2954
2955
ret = ath10k_pci_init_config(ar);
2956
if (ret) {
2957
ath10k_err(ar, "failed to setup init config: %d\n", ret);
2958
goto err_ce;
2959
}
2960
2961
ret = ath10k_pci_wake_target_cpu(ar);
2962
if (ret) {
2963
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2964
goto err_ce;
2965
}
2966
2967
return 0;
2968
2969
err_ce:
2970
ath10k_pci_ce_deinit(ar);
2971
2972
err_sleep:
2973
return ret;
2974
}
2975
2976
void ath10k_pci_hif_power_down(struct ath10k *ar)
2977
{
2978
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2979
2980
/* Currently hif_power_up performs effectively a reset and hif_stop
2981
* resets the chip as well so there's no point in resetting here.
2982
*/
2983
}
2984
2985
static int ath10k_pci_hif_suspend(struct ath10k *ar)
2986
{
2987
/* Nothing to do; the important stuff is in the driver suspend. */
2988
return 0;
2989
}
2990
2991
#ifdef CONFIG_PM
2992
static int ath10k_pci_suspend(struct ath10k *ar)
2993
{
2994
/* The grace timer can still be counting down and ar->ps_awake be true.
2995
* It is known that the device may be asleep after resuming regardless
2996
* of the SoC powersave state before suspending. Hence make sure the
2997
* device is asleep before proceeding.
2998
*/
2999
ath10k_pci_sleep_sync(ar);
3000
3001
return 0;
3002
}
3003
#endif
3004
3005
static int ath10k_pci_hif_resume(struct ath10k *ar)
3006
{
3007
/* Nothing to do; the important stuff is in the driver resume. */
3008
return 0;
3009
}
3010
3011
#ifdef CONFIG_PM
3012
static int ath10k_pci_resume(struct ath10k *ar)
3013
{
3014
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3015
struct pci_dev *pdev = ar_pci->pdev;
3016
u32 val;
3017
int ret = 0;
3018
3019
ret = ath10k_pci_force_wake(ar);
3020
if (ret) {
3021
ath10k_err(ar, "failed to wake up target: %d\n", ret);
3022
return ret;
3023
}
3024
3025
/* Suspend/Resume resets the PCI configuration space, so we have to
3026
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
3027
* from interfering with C3 CPU state. pci_restore_state won't help
3028
* here since it only restores the first 64 bytes pci config header.
3029
*/
3030
pci_read_config_dword(pdev, 0x40, &val);
3031
if ((val & 0x0000ff00) != 0)
3032
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
3033
3034
return ret;
3035
}
3036
#endif
3037
3038
static bool ath10k_pci_validate_cal(void *data, size_t size)
3039
{
3040
__le16 *cal_words = data;
3041
u16 checksum = 0;
3042
size_t i;
3043
3044
if (size % 2 != 0)
3045
return false;
3046
3047
for (i = 0; i < size / 2; i++)
3048
checksum ^= le16_to_cpu(cal_words[i]);
3049
3050
return checksum == 0xffff;
3051
}
3052
3053
static void ath10k_pci_enable_eeprom(struct ath10k *ar)
3054
{
3055
/* Enable SI clock */
3056
ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
3057
3058
/* Configure GPIOs for I2C operation */
3059
ath10k_pci_write32(ar,
3060
GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
3061
4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
3062
SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
3063
GPIO_PIN0_CONFIG) |
3064
SM(1, GPIO_PIN0_PAD_PULL));
3065
3066
ath10k_pci_write32(ar,
3067
GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
3068
4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
3069
SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
3070
SM(1, GPIO_PIN0_PAD_PULL));
3071
3072
ath10k_pci_write32(ar,
3073
GPIO_BASE_ADDRESS +
3074
QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
3075
1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
3076
3077
/* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
3078
ath10k_pci_write32(ar,
3079
SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
3080
SM(1, SI_CONFIG_ERR_INT) |
3081
SM(1, SI_CONFIG_BIDIR_OD_DATA) |
3082
SM(1, SI_CONFIG_I2C) |
3083
SM(1, SI_CONFIG_POS_SAMPLE) |
3084
SM(1, SI_CONFIG_INACTIVE_DATA) |
3085
SM(1, SI_CONFIG_INACTIVE_CLK) |
3086
SM(8, SI_CONFIG_DIVIDER));
3087
}
3088
3089
static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
3090
{
3091
u32 reg;
3092
int wait_limit;
3093
3094
/* set device select byte and for the read operation */
3095
reg = QCA9887_EEPROM_SELECT_READ |
3096
SM(addr, QCA9887_EEPROM_ADDR_LO) |
3097
SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
3098
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
3099
3100
/* write transmit data, transfer length, and START bit */
3101
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
3102
SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
3103
SM(4, SI_CS_TX_CNT));
3104
3105
/* wait max 1 sec */
3106
wait_limit = 100000;
3107
3108
/* wait for SI_CS_DONE_INT */
3109
do {
3110
reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3111
if (MS(reg, SI_CS_DONE_INT))
3112
break;
3113
3114
wait_limit--;
3115
udelay(10);
3116
} while (wait_limit > 0);
3117
3118
if (!MS(reg, SI_CS_DONE_INT)) {
3119
ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3120
addr);
3121
return -ETIMEDOUT;
3122
}
3123
3124
/* clear SI_CS_DONE_INT */
3125
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3126
3127
if (MS(reg, SI_CS_DONE_ERR)) {
3128
ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3129
return -EIO;
3130
}
3131
3132
/* extract receive data */
3133
reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3134
*out = reg;
3135
3136
return 0;
3137
}
3138
3139
static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3140
size_t *data_len)
3141
{
3142
u8 *caldata = NULL;
3143
size_t calsize, i;
3144
int ret;
3145
3146
if (!QCA_REV_9887(ar))
3147
return -EOPNOTSUPP;
3148
3149
calsize = ar->hw_params.cal_data_len;
3150
caldata = kmalloc(calsize, GFP_KERNEL);
3151
if (!caldata)
3152
return -ENOMEM;
3153
3154
ath10k_pci_enable_eeprom(ar);
3155
3156
for (i = 0; i < calsize; i++) {
3157
ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3158
if (ret)
3159
goto err_free;
3160
}
3161
3162
if (!ath10k_pci_validate_cal(caldata, calsize))
3163
goto err_free;
3164
3165
*data = caldata;
3166
*data_len = calsize;
3167
3168
return 0;
3169
3170
err_free:
3171
kfree(caldata);
3172
3173
return -EINVAL;
3174
}
3175
3176
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3177
.tx_sg = ath10k_pci_hif_tx_sg,
3178
.diag_read = ath10k_pci_hif_diag_read,
3179
.diag_write = ath10k_pci_diag_write_mem,
3180
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
3181
.start = ath10k_pci_hif_start,
3182
.stop = ath10k_pci_hif_stop,
3183
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
3184
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
3185
.send_complete_check = ath10k_pci_hif_send_complete_check,
3186
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
3187
.power_up = ath10k_pci_hif_power_up,
3188
.power_down = ath10k_pci_hif_power_down,
3189
.read32 = ath10k_pci_read32,
3190
.write32 = ath10k_pci_write32,
3191
.suspend = ath10k_pci_hif_suspend,
3192
.resume = ath10k_pci_hif_resume,
3193
.fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
3194
};
3195
3196
/*
3197
* Top-level interrupt handler for all PCI interrupts from a Target.
3198
* When a block of MSI interrupts is allocated, this top-level handler
3199
* is not used; instead, we directly call the correct sub-handler.
3200
*/
3201
static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3202
{
3203
struct ath10k *ar = arg;
3204
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3205
int ret;
3206
3207
if (ath10k_pci_has_device_gone(ar))
3208
return IRQ_NONE;
3209
3210
ret = ath10k_pci_force_wake(ar);
3211
if (ret) {
3212
ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3213
return IRQ_NONE;
3214
}
3215
3216
if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) &&
3217
!ath10k_pci_irq_pending(ar))
3218
return IRQ_NONE;
3219
3220
ath10k_pci_disable_and_clear_intx_irq(ar);
3221
ath10k_pci_irq_msi_fw_mask(ar);
3222
napi_schedule(&ar->napi);
3223
3224
return IRQ_HANDLED;
3225
}
3226
3227
static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3228
{
3229
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3230
int done = 0;
3231
3232
if (ath10k_pci_has_fw_crashed(ar)) {
3233
ath10k_pci_fw_crashed_clear(ar);
3234
ath10k_pci_fw_crashed_dump(ar);
3235
napi_complete(ctx);
3236
return done;
3237
}
3238
3239
ath10k_ce_per_engine_service_any(ar);
3240
3241
done = ath10k_htt_txrx_compl_task(ar, budget);
3242
3243
if (done < budget) {
3244
napi_complete_done(ctx, done);
3245
/* In case of MSI, it is possible that interrupts are received
3246
* while NAPI poll is inprogress. So pending interrupts that are
3247
* received after processing all copy engine pipes by NAPI poll
3248
* will not be handled again. This is causing failure to
3249
* complete boot sequence in x86 platform. So before enabling
3250
* interrupts safer to check for pending interrupts for
3251
* immediate servicing.
3252
*/
3253
if (ath10k_ce_interrupt_summary(ar)) {
3254
napi_schedule(ctx);
3255
goto out;
3256
}
3257
ath10k_pci_enable_intx_irq(ar);
3258
ath10k_pci_irq_msi_fw_unmask(ar);
3259
}
3260
3261
out:
3262
return done;
3263
}
3264
3265
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3266
{
3267
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3268
int ret;
3269
3270
ret = request_irq(ar_pci->pdev->irq,
3271
ath10k_pci_interrupt_handler,
3272
IRQF_SHARED, "ath10k_pci", ar);
3273
if (ret) {
3274
ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3275
ar_pci->pdev->irq, ret);
3276
return ret;
3277
}
3278
3279
return 0;
3280
}
3281
3282
static int ath10k_pci_request_irq_intx(struct ath10k *ar)
3283
{
3284
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3285
int ret;
3286
3287
ret = request_irq(ar_pci->pdev->irq,
3288
ath10k_pci_interrupt_handler,
3289
IRQF_SHARED, "ath10k_pci", ar);
3290
if (ret) {
3291
ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3292
ar_pci->pdev->irq, ret);
3293
return ret;
3294
}
3295
3296
return 0;
3297
}
3298
3299
static int ath10k_pci_request_irq(struct ath10k *ar)
3300
{
3301
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3302
3303
switch (ar_pci->oper_irq_mode) {
3304
case ATH10K_PCI_IRQ_INTX:
3305
return ath10k_pci_request_irq_intx(ar);
3306
case ATH10K_PCI_IRQ_MSI:
3307
return ath10k_pci_request_irq_msi(ar);
3308
default:
3309
return -EINVAL;
3310
}
3311
}
3312
3313
static void ath10k_pci_free_irq(struct ath10k *ar)
3314
{
3315
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3316
3317
free_irq(ar_pci->pdev->irq, ar);
3318
}
3319
3320
void ath10k_pci_init_napi(struct ath10k *ar)
3321
{
3322
netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
3323
}
3324
3325
static int ath10k_pci_init_irq(struct ath10k *ar)
3326
{
3327
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3328
int ret;
3329
3330
ath10k_pci_init_napi(ar);
3331
3332
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3333
ath10k_info(ar, "limiting irq mode to: %d\n",
3334
ath10k_pci_irq_mode);
3335
3336
/* Try MSI */
3337
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_INTX) {
3338
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3339
ret = pci_enable_msi(ar_pci->pdev);
3340
if (ret == 0)
3341
return 0;
3342
3343
/* MHI failed, try legacy irq next */
3344
}
3345
3346
/* Try legacy irq
3347
*
3348
* A potential race occurs here: The CORE_BASE write
3349
* depends on target correctly decoding AXI address but
3350
* host won't know when target writes BAR to CORE_CTRL.
3351
* This write might get lost if target has NOT written BAR.
3352
* For now, fix the race by repeating the write in below
3353
* synchronization checking.
3354
*/
3355
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;
3356
3357
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3358
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3359
3360
return 0;
3361
}
3362
3363
static void ath10k_pci_deinit_irq_intx(struct ath10k *ar)
3364
{
3365
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3366
0);
3367
}
3368
3369
static int ath10k_pci_deinit_irq(struct ath10k *ar)
3370
{
3371
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3372
3373
switch (ar_pci->oper_irq_mode) {
3374
case ATH10K_PCI_IRQ_INTX:
3375
ath10k_pci_deinit_irq_intx(ar);
3376
break;
3377
default:
3378
pci_disable_msi(ar_pci->pdev);
3379
break;
3380
}
3381
3382
return 0;
3383
}
3384
3385
int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3386
{
3387
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3388
unsigned long timeout;
3389
u32 val;
3390
3391
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3392
3393
timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3394
3395
do {
3396
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3397
3398
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3399
val);
3400
3401
/* target should never return this */
3402
if (val == 0xffffffff)
3403
continue;
3404
3405
/* the device has crashed so don't bother trying anymore */
3406
if (val & FW_IND_EVENT_PENDING)
3407
break;
3408
3409
if (val & FW_IND_INITIALIZED)
3410
break;
3411
3412
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX)
3413
/* Fix potential race by repeating CORE_BASE writes */
3414
ath10k_pci_enable_intx_irq(ar);
3415
3416
mdelay(10);
3417
} while (time_before(jiffies, timeout));
3418
3419
ath10k_pci_disable_and_clear_intx_irq(ar);
3420
ath10k_pci_irq_msi_fw_mask(ar);
3421
3422
if (val == 0xffffffff) {
3423
ath10k_err(ar, "failed to read device register, device is gone\n");
3424
return -EIO;
3425
}
3426
3427
if (val & FW_IND_EVENT_PENDING) {
3428
ath10k_warn(ar, "device has crashed during init\n");
3429
return -ECOMM;
3430
}
3431
3432
if (!(val & FW_IND_INITIALIZED)) {
3433
ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3434
val);
3435
return -ETIMEDOUT;
3436
}
3437
3438
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3439
return 0;
3440
}
3441
3442
static int ath10k_pci_cold_reset(struct ath10k *ar)
3443
{
3444
u32 val;
3445
3446
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3447
3448
spin_lock_bh(&ar->data_lock);
3449
3450
ar->stats.fw_cold_reset_counter++;
3451
3452
spin_unlock_bh(&ar->data_lock);
3453
3454
/* Put Target, including PCIe, into RESET. */
3455
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3456
val |= 1;
3457
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3458
3459
/* After writing into SOC_GLOBAL_RESET to put device into
3460
* reset and pulling out of reset pcie may not be stable
3461
* for any immediate pcie register access and cause bus error,
3462
* add delay before any pcie access request to fix this issue.
3463
*/
3464
msleep(20);
3465
3466
/* Pull Target, including PCIe, out of RESET. */
3467
val &= ~1;
3468
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3469
3470
msleep(20);
3471
3472
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3473
3474
return 0;
3475
}
3476
3477
static int ath10k_pci_claim(struct ath10k *ar)
3478
{
3479
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3480
struct pci_dev *pdev = ar_pci->pdev;
3481
int ret;
3482
3483
pci_set_drvdata(pdev, ar);
3484
3485
ret = pci_enable_device(pdev);
3486
if (ret) {
3487
ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3488
return ret;
3489
}
3490
3491
ret = pci_request_region(pdev, BAR_NUM, "ath");
3492
if (ret) {
3493
ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3494
ret);
3495
goto err_device;
3496
}
3497
3498
/* Target expects 32 bit DMA. Enforce it. */
3499
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3500
if (ret) {
3501
ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3502
goto err_region;
3503
}
3504
3505
pci_set_master(pdev);
3506
3507
#if defined(__FreeBSD__)
3508
linuxkpi_pcim_want_to_use_bus_functions(pdev);
3509
#endif
3510
3511
/* Arrange for access to Target SoC registers. */
3512
ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3513
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3514
if (!ar_pci->mem) {
3515
ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3516
ret = -EIO;
3517
goto err_region;
3518
}
3519
3520
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
3521
return 0;
3522
3523
err_region:
3524
pci_release_region(pdev, BAR_NUM);
3525
3526
err_device:
3527
pci_disable_device(pdev);
3528
3529
return ret;
3530
}
3531
3532
static void ath10k_pci_release(struct ath10k *ar)
3533
{
3534
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3535
struct pci_dev *pdev = ar_pci->pdev;
3536
3537
pci_iounmap(pdev, ar_pci->mem);
3538
pci_release_region(pdev, BAR_NUM);
3539
pci_disable_device(pdev);
3540
}
3541
3542
static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3543
{
3544
const struct ath10k_pci_supp_chip *supp_chip;
3545
int i;
3546
u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3547
3548
for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3549
supp_chip = &ath10k_pci_supp_chips[i];
3550
3551
if (supp_chip->dev_id == dev_id &&
3552
supp_chip->rev_id == rev_id)
3553
return true;
3554
}
3555
3556
return false;
3557
}
3558
3559
int ath10k_pci_setup_resource(struct ath10k *ar)
3560
{
3561
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3562
struct ath10k_ce *ce = ath10k_ce_priv(ar);
3563
int ret;
3564
3565
spin_lock_init(&ce->ce_lock);
3566
spin_lock_init(&ar_pci->ps_lock);
3567
mutex_init(&ar_pci->ce_diag_mutex);
3568
3569
INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3570
3571
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3572
3573
ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
3574
sizeof(pci_host_ce_config_wlan),
3575
GFP_KERNEL);
3576
if (!ar_pci->attr)
3577
return -ENOMEM;
3578
3579
ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
3580
sizeof(pci_target_ce_config_wlan),
3581
GFP_KERNEL);
3582
if (!ar_pci->pipe_config) {
3583
ret = -ENOMEM;
3584
goto err_free_attr;
3585
}
3586
3587
ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
3588
sizeof(pci_target_service_to_ce_map_wlan),
3589
GFP_KERNEL);
3590
if (!ar_pci->serv_to_pipe) {
3591
ret = -ENOMEM;
3592
goto err_free_pipe_config;
3593
}
3594
3595
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3596
ath10k_pci_override_ce_config(ar);
3597
3598
ret = ath10k_pci_alloc_pipes(ar);
3599
if (ret) {
3600
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3601
ret);
3602
goto err_free_serv_to_pipe;
3603
}
3604
3605
return 0;
3606
3607
err_free_serv_to_pipe:
3608
kfree(ar_pci->serv_to_pipe);
3609
err_free_pipe_config:
3610
kfree(ar_pci->pipe_config);
3611
err_free_attr:
3612
kfree(ar_pci->attr);
3613
return ret;
3614
}
3615
3616
void ath10k_pci_release_resource(struct ath10k *ar)
3617
{
3618
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3619
3620
ath10k_pci_rx_retry_sync(ar);
3621
netif_napi_del(&ar->napi);
3622
ath10k_pci_ce_deinit(ar);
3623
ath10k_pci_free_pipes(ar);
3624
kfree(ar_pci->attr);
3625
kfree(ar_pci->pipe_config);
3626
kfree(ar_pci->serv_to_pipe);
3627
}
3628
3629
static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3630
.read32 = ath10k_bus_pci_read32,
3631
.write32 = ath10k_bus_pci_write32,
3632
.get_num_banks = ath10k_pci_get_num_banks,
3633
};
3634
3635
static int ath10k_pci_probe(struct pci_dev *pdev,
3636
const struct pci_device_id *pci_dev)
3637
{
3638
int ret = 0;
3639
struct ath10k *ar;
3640
struct ath10k_pci *ar_pci;
3641
enum ath10k_hw_rev hw_rev;
3642
struct ath10k_bus_params bus_params = {};
3643
bool pci_ps, is_qca988x = false;
3644
int (*pci_soft_reset)(struct ath10k *ar);
3645
int (*pci_hard_reset)(struct ath10k *ar);
3646
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3647
3648
switch (pci_dev->device) {
3649
case QCA988X_2_0_DEVICE_ID_UBNT:
3650
case QCA988X_2_0_DEVICE_ID:
3651
hw_rev = ATH10K_HW_QCA988X;
3652
pci_ps = false;
3653
is_qca988x = true;
3654
pci_soft_reset = ath10k_pci_warm_reset;
3655
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3656
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3657
break;
3658
case QCA9887_1_0_DEVICE_ID:
3659
hw_rev = ATH10K_HW_QCA9887;
3660
pci_ps = false;
3661
pci_soft_reset = ath10k_pci_warm_reset;
3662
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3663
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3664
break;
3665
case QCA6164_2_1_DEVICE_ID:
3666
case QCA6174_2_1_DEVICE_ID:
3667
hw_rev = ATH10K_HW_QCA6174;
3668
pci_ps = true;
3669
pci_soft_reset = ath10k_pci_warm_reset;
3670
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3671
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3672
break;
3673
case QCA99X0_2_0_DEVICE_ID:
3674
hw_rev = ATH10K_HW_QCA99X0;
3675
pci_ps = false;
3676
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3677
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3678
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3679
break;
3680
case QCA9984_1_0_DEVICE_ID:
3681
hw_rev = ATH10K_HW_QCA9984;
3682
pci_ps = false;
3683
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3684
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3685
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3686
break;
3687
case QCA9888_2_0_DEVICE_ID:
3688
hw_rev = ATH10K_HW_QCA9888;
3689
pci_ps = false;
3690
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3691
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3692
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3693
break;
3694
case QCA9377_1_0_DEVICE_ID:
3695
hw_rev = ATH10K_HW_QCA9377;
3696
pci_ps = true;
3697
pci_soft_reset = ath10k_pci_warm_reset;
3698
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3699
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3700
break;
3701
default:
3702
WARN_ON(1);
3703
return -EOPNOTSUPP;
3704
}
3705
3706
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3707
hw_rev, &ath10k_pci_hif_ops);
3708
if (!ar) {
3709
dev_err(&pdev->dev, "failed to allocate core\n");
3710
return -ENOMEM;
3711
}
3712
3713
ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3714
pdev->vendor, pdev->device,
3715
pdev->subsystem_vendor, pdev->subsystem_device);
3716
3717
ar_pci = ath10k_pci_priv(ar);
3718
ar_pci->pdev = pdev;
3719
ar_pci->dev = &pdev->dev;
3720
ar_pci->ar = ar;
3721
ar->dev_id = pci_dev->device;
3722
ar_pci->pci_ps = pci_ps;
3723
ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3724
ar_pci->pci_soft_reset = pci_soft_reset;
3725
ar_pci->pci_hard_reset = pci_hard_reset;
3726
ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3727
ar->ce_priv = &ar_pci->ce;
3728
3729
ar->id.vendor = pdev->vendor;
3730
ar->id.device = pdev->device;
3731
ar->id.subsystem_vendor = pdev->subsystem_vendor;
3732
ar->id.subsystem_device = pdev->subsystem_device;
3733
3734
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3735
3736
ret = ath10k_pci_setup_resource(ar);
3737
if (ret) {
3738
ath10k_err(ar, "failed to setup resource: %d\n", ret);
3739
goto err_core_destroy;
3740
}
3741
3742
ret = ath10k_pci_claim(ar);
3743
if (ret) {
3744
ath10k_err(ar, "failed to claim device: %d\n", ret);
3745
goto err_free_pipes;
3746
}
3747
3748
ret = ath10k_pci_force_wake(ar);
3749
if (ret) {
3750
ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3751
goto err_sleep;
3752
}
3753
3754
ath10k_pci_ce_deinit(ar);
3755
ath10k_pci_irq_disable(ar);
3756
3757
ret = ath10k_pci_init_irq(ar);
3758
if (ret) {
3759
ath10k_err(ar, "failed to init irqs: %d\n", ret);
3760
goto err_sleep;
3761
}
3762
3763
ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3764
ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3765
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3766
3767
ret = ath10k_pci_request_irq(ar);
3768
if (ret) {
3769
ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3770
goto err_deinit_irq;
3771
}
3772
3773
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3774
bus_params.link_can_suspend = true;
3775
/* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
3776
* fall off the bus during chip_reset. These chips have the same pci
3777
* device id as the QCA9880 BR4A or 2R4E. So that's why the check.
3778
*/
3779
if (is_qca988x) {
3780
bus_params.chip_id =
3781
ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3782
if (bus_params.chip_id != 0xffffffff) {
3783
if (!ath10k_pci_chip_is_supported(pdev->device,
3784
bus_params.chip_id)) {
3785
ret = -ENODEV;
3786
goto err_unsupported;
3787
}
3788
}
3789
}
3790
3791
ret = ath10k_pci_chip_reset(ar);
3792
if (ret) {
3793
ath10k_err(ar, "failed to reset chip: %d\n", ret);
3794
goto err_free_irq;
3795
}
3796
3797
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3798
if (bus_params.chip_id == 0xffffffff) {
3799
ret = -ENODEV;
3800
goto err_unsupported;
3801
}
3802
3803
if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3804
ret = -ENODEV;
3805
goto err_unsupported;
3806
}
3807
3808
ret = ath10k_core_register(ar, &bus_params);
3809
if (ret) {
3810
ath10k_err(ar, "failed to register driver core: %d\n", ret);
3811
goto err_free_irq;
3812
}
3813
3814
return 0;
3815
3816
err_unsupported:
3817
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3818
pdev->device, bus_params.chip_id);
3819
3820
err_free_irq:
3821
ath10k_pci_free_irq(ar);
3822
3823
err_deinit_irq:
3824
ath10k_pci_release_resource(ar);
3825
3826
err_sleep:
3827
ath10k_pci_sleep_sync(ar);
3828
ath10k_pci_release(ar);
3829
3830
err_free_pipes:
3831
ath10k_pci_free_pipes(ar);
3832
3833
err_core_destroy:
3834
ath10k_core_destroy(ar);
3835
3836
return ret;
3837
}
3838
3839
static void ath10k_pci_remove(struct pci_dev *pdev)
3840
{
3841
struct ath10k *ar = pci_get_drvdata(pdev);
3842
3843
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3844
3845
if (!ar)
3846
return;
3847
3848
ath10k_core_unregister(ar);
3849
ath10k_pci_free_irq(ar);
3850
ath10k_pci_deinit_irq(ar);
3851
ath10k_pci_release_resource(ar);
3852
ath10k_pci_sleep_sync(ar);
3853
ath10k_pci_release(ar);
3854
ath10k_core_destroy(ar);
3855
}
3856
3857
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3858
3859
#ifdef CONFIG_PM
3860
static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3861
{
3862
struct ath10k *ar = dev_get_drvdata(dev);
3863
int ret;
3864
3865
ret = ath10k_pci_suspend(ar);
3866
if (ret)
3867
ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3868
3869
return ret;
3870
}
3871
3872
static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3873
{
3874
struct ath10k *ar = dev_get_drvdata(dev);
3875
int ret;
3876
3877
ret = ath10k_pci_resume(ar);
3878
if (ret)
3879
ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3880
3881
return ret;
3882
}
3883
3884
static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3885
ath10k_pci_pm_suspend,
3886
ath10k_pci_pm_resume);
3887
#endif
3888
3889
static struct pci_driver ath10k_pci_driver = {
3890
.name = "ath10k_pci",
3891
.id_table = ath10k_pci_id_table,
3892
.probe = ath10k_pci_probe,
3893
.remove = ath10k_pci_remove,
3894
#ifdef CONFIG_PM
3895
.driver.pm = &ath10k_pci_pm_ops,
3896
#endif
3897
#if defined(__FreeBSD__)
3898
.bsddriver.name = KBUILD_MODNAME,
3899
/* Allow a possible native driver to attach. */
3900
.bsd_probe_return = (BUS_PROBE_DEFAULT - 1),
3901
#endif
3902
};
3903
3904
static int __init ath10k_pci_init(void)
3905
{
3906
int ret1, ret2;
3907
3908
ret1 = pci_register_driver(&ath10k_pci_driver);
3909
if (ret1)
3910
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3911
ret1);
3912
3913
ret2 = ath10k_ahb_init();
3914
if (ret2)
3915
printk(KERN_ERR "ahb init failed: %d\n", ret2);
3916
3917
if (ret1 && ret2)
3918
return ret1;
3919
3920
/* registered to at least one bus */
3921
return 0;
3922
}
3923
module_init(ath10k_pci_init);
3924
3925
static void __exit ath10k_pci_exit(void)
3926
{
3927
pci_unregister_driver(&ath10k_pci_driver);
3928
ath10k_ahb_exit();
3929
}
3930
3931
module_exit(ath10k_pci_exit);
3932
3933
MODULE_AUTHOR("Qualcomm Atheros");
3934
MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices");
3935
MODULE_LICENSE("Dual BSD/GPL");
3936
3937
/* QCA988x 2.0 firmware files */
3938
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3939
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3940
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3941
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3942
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
3943
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3944
3945
/* QCA9887 1.0 firmware files */
3946
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3947
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
3948
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3949
3950
/* QCA6174 2.1 firmware files */
3951
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3952
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3953
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
3954
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3955
3956
/* QCA6174 3.1 firmware files */
3957
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3958
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3959
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3960
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
3961
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3962
3963
/* QCA9377 1.0 firmware files */
3964
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3965
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3966
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
3967
3968