Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath10k/pci.c
48375 views
1
// SPDX-License-Identifier: ISC
2
/*
3
* Copyright (c) 2005-2011 Atheros Communications Inc.
4
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5
*/
6
7
#if defined(__FreeBSD__)
8
#define LINUXKPI_PARAM_PREFIX ath10k_pci_
9
#endif
10
11
#include <linux/pci.h>
12
#include <linux/module.h>
13
#include <linux/interrupt.h>
14
#include <linux/spinlock.h>
15
#include <linux/bitops.h>
16
#if defined(__FreeBSD__)
17
#include <linux/delay.h>
18
#include <sys/rman.h>
19
#endif
20
21
#include "core.h"
22
#include "debug.h"
23
#include "coredump.h"
24
25
#include "targaddrs.h"
26
#include "bmi.h"
27
28
#include "hif.h"
29
#include "htc.h"
30
31
#include "ce.h"
32
#include "pci.h"
33
34
enum ath10k_pci_reset_mode {
35
ATH10K_PCI_RESET_AUTO = 0,
36
ATH10K_PCI_RESET_WARM_ONLY = 1,
37
};
38
39
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
40
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
41
42
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
43
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
44
45
module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
46
MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
47
48
/* how long wait to wait for target to initialise, in ms */
49
#define ATH10K_PCI_TARGET_WAIT 3000
50
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
51
52
/* Maximum number of bytes that can be handled atomically by
53
* diag read and write.
54
*/
55
#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
56
57
#define QCA99X0_PCIE_BAR0_START_REG 0x81030
58
#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
59
#define QCA99X0_CPU_MEM_DATA_REG 0x4d010
60
61
static const struct pci_device_id ath10k_pci_id_table[] = {
62
/* PCI-E QCA988X V2 (Ubiquiti branded) */
63
{ PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
64
65
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
66
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
67
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
68
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
69
{ PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
70
{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
71
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
72
{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
73
{0}
74
};
75
76
static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
77
/* QCA988X pre 2.0 chips are not supported because they need some nasty
78
* hacks. ath10k doesn't have them and these devices crash horribly
79
* because of that.
80
*/
81
{ QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
82
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
83
84
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
85
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
86
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
87
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
88
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
89
90
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
91
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
92
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
93
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
94
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
95
96
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
97
98
{ QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
99
100
{ QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
101
102
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
103
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
104
105
{ QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
106
};
107
108
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
109
static int ath10k_pci_cold_reset(struct ath10k *ar);
110
static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
111
static int ath10k_pci_init_irq(struct ath10k *ar);
112
static int ath10k_pci_deinit_irq(struct ath10k *ar);
113
static int ath10k_pci_request_irq(struct ath10k *ar);
114
static void ath10k_pci_free_irq(struct ath10k *ar);
115
static int ath10k_pci_bmi_wait(struct ath10k *ar,
116
struct ath10k_ce_pipe *tx_pipe,
117
struct ath10k_ce_pipe *rx_pipe,
118
struct bmi_xfer *xfer);
119
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
120
static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
121
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
122
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
123
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
124
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
125
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
126
127
static const struct ce_attr pci_host_ce_config_wlan[] = {
128
/* CE0: host->target HTC control and raw streams */
129
{
130
.flags = CE_ATTR_FLAGS,
131
.src_nentries = 16,
132
.src_sz_max = 256,
133
.dest_nentries = 0,
134
.send_cb = ath10k_pci_htc_tx_cb,
135
},
136
137
/* CE1: target->host HTT + HTC control */
138
{
139
.flags = CE_ATTR_FLAGS,
140
.src_nentries = 0,
141
.src_sz_max = 2048,
142
.dest_nentries = 512,
143
.recv_cb = ath10k_pci_htt_htc_rx_cb,
144
},
145
146
/* CE2: target->host WMI */
147
{
148
.flags = CE_ATTR_FLAGS,
149
.src_nentries = 0,
150
.src_sz_max = 2048,
151
.dest_nentries = 128,
152
.recv_cb = ath10k_pci_htc_rx_cb,
153
},
154
155
/* CE3: host->target WMI */
156
{
157
.flags = CE_ATTR_FLAGS,
158
.src_nentries = 32,
159
.src_sz_max = 2048,
160
.dest_nentries = 0,
161
.send_cb = ath10k_pci_htc_tx_cb,
162
},
163
164
/* CE4: host->target HTT */
165
{
166
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
167
.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
168
.src_sz_max = 256,
169
.dest_nentries = 0,
170
.send_cb = ath10k_pci_htt_tx_cb,
171
},
172
173
/* CE5: target->host HTT (HIF->HTT) */
174
{
175
.flags = CE_ATTR_FLAGS,
176
.src_nentries = 0,
177
.src_sz_max = 512,
178
.dest_nentries = 512,
179
.recv_cb = ath10k_pci_htt_rx_cb,
180
},
181
182
/* CE6: target autonomous hif_memcpy */
183
{
184
.flags = CE_ATTR_FLAGS,
185
.src_nentries = 0,
186
.src_sz_max = 0,
187
.dest_nentries = 0,
188
},
189
190
/* CE7: ce_diag, the Diagnostic Window */
191
{
192
.flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
193
.src_nentries = 2,
194
.src_sz_max = DIAG_TRANSFER_LIMIT,
195
.dest_nentries = 2,
196
},
197
198
/* CE8: target->host pktlog */
199
{
200
.flags = CE_ATTR_FLAGS,
201
.src_nentries = 0,
202
.src_sz_max = 2048,
203
.dest_nentries = 128,
204
.recv_cb = ath10k_pci_pktlog_rx_cb,
205
},
206
207
/* CE9 target autonomous qcache memcpy */
208
{
209
.flags = CE_ATTR_FLAGS,
210
.src_nentries = 0,
211
.src_sz_max = 0,
212
.dest_nentries = 0,
213
},
214
215
/* CE10: target autonomous hif memcpy */
216
{
217
.flags = CE_ATTR_FLAGS,
218
.src_nentries = 0,
219
.src_sz_max = 0,
220
.dest_nentries = 0,
221
},
222
223
/* CE11: target autonomous hif memcpy */
224
{
225
.flags = CE_ATTR_FLAGS,
226
.src_nentries = 0,
227
.src_sz_max = 0,
228
.dest_nentries = 0,
229
},
230
};
231
232
/* Target firmware's Copy Engine configuration. */
233
static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
234
/* CE0: host->target HTC control and raw streams */
235
{
236
.pipenum = __cpu_to_le32(0),
237
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
238
.nentries = __cpu_to_le32(32),
239
.nbytes_max = __cpu_to_le32(256),
240
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
241
.reserved = __cpu_to_le32(0),
242
},
243
244
/* CE1: target->host HTT + HTC control */
245
{
246
.pipenum = __cpu_to_le32(1),
247
.pipedir = __cpu_to_le32(PIPEDIR_IN),
248
.nentries = __cpu_to_le32(32),
249
.nbytes_max = __cpu_to_le32(2048),
250
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
251
.reserved = __cpu_to_le32(0),
252
},
253
254
/* CE2: target->host WMI */
255
{
256
.pipenum = __cpu_to_le32(2),
257
.pipedir = __cpu_to_le32(PIPEDIR_IN),
258
.nentries = __cpu_to_le32(64),
259
.nbytes_max = __cpu_to_le32(2048),
260
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
261
.reserved = __cpu_to_le32(0),
262
},
263
264
/* CE3: host->target WMI */
265
{
266
.pipenum = __cpu_to_le32(3),
267
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
268
.nentries = __cpu_to_le32(32),
269
.nbytes_max = __cpu_to_le32(2048),
270
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
271
.reserved = __cpu_to_le32(0),
272
},
273
274
/* CE4: host->target HTT */
275
{
276
.pipenum = __cpu_to_le32(4),
277
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
278
.nentries = __cpu_to_le32(256),
279
.nbytes_max = __cpu_to_le32(256),
280
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
281
.reserved = __cpu_to_le32(0),
282
},
283
284
/* NB: 50% of src nentries, since tx has 2 frags */
285
286
/* CE5: target->host HTT (HIF->HTT) */
287
{
288
.pipenum = __cpu_to_le32(5),
289
.pipedir = __cpu_to_le32(PIPEDIR_IN),
290
.nentries = __cpu_to_le32(32),
291
.nbytes_max = __cpu_to_le32(512),
292
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
293
.reserved = __cpu_to_le32(0),
294
},
295
296
/* CE6: Reserved for target autonomous hif_memcpy */
297
{
298
.pipenum = __cpu_to_le32(6),
299
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
300
.nentries = __cpu_to_le32(32),
301
.nbytes_max = __cpu_to_le32(4096),
302
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
303
.reserved = __cpu_to_le32(0),
304
},
305
306
/* CE7 used only by Host */
307
{
308
.pipenum = __cpu_to_le32(7),
309
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
310
.nentries = __cpu_to_le32(0),
311
.nbytes_max = __cpu_to_le32(0),
312
.flags = __cpu_to_le32(0),
313
.reserved = __cpu_to_le32(0),
314
},
315
316
/* CE8 target->host packtlog */
317
{
318
.pipenum = __cpu_to_le32(8),
319
.pipedir = __cpu_to_le32(PIPEDIR_IN),
320
.nentries = __cpu_to_le32(64),
321
.nbytes_max = __cpu_to_le32(2048),
322
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
323
.reserved = __cpu_to_le32(0),
324
},
325
326
/* CE9 target autonomous qcache memcpy */
327
{
328
.pipenum = __cpu_to_le32(9),
329
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
330
.nentries = __cpu_to_le32(32),
331
.nbytes_max = __cpu_to_le32(2048),
332
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
333
.reserved = __cpu_to_le32(0),
334
},
335
336
/* It not necessary to send target wlan configuration for CE10 & CE11
337
* as these CEs are not actively used in target.
338
*/
339
};
340
341
/*
342
* Map from service/endpoint to Copy Engine.
343
* This table is derived from the CE_PCI TABLE, above.
344
* It is passed to the Target at startup for use by firmware.
345
*/
346
static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
347
{
348
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
349
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
350
__cpu_to_le32(3),
351
},
352
{
353
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
354
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
355
__cpu_to_le32(2),
356
},
357
{
358
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
359
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
360
__cpu_to_le32(3),
361
},
362
{
363
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
364
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
365
__cpu_to_le32(2),
366
},
367
{
368
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
369
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
370
__cpu_to_le32(3),
371
},
372
{
373
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
374
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
375
__cpu_to_le32(2),
376
},
377
{
378
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
379
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
380
__cpu_to_le32(3),
381
},
382
{
383
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
384
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
385
__cpu_to_le32(2),
386
},
387
{
388
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
389
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
390
__cpu_to_le32(3),
391
},
392
{
393
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
394
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
395
__cpu_to_le32(2),
396
},
397
{
398
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
399
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
400
__cpu_to_le32(0),
401
},
402
{
403
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
404
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
405
__cpu_to_le32(1),
406
},
407
{ /* not used */
408
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
409
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
410
__cpu_to_le32(0),
411
},
412
{ /* not used */
413
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
414
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
415
__cpu_to_le32(1),
416
},
417
{
418
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
419
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
420
__cpu_to_le32(4),
421
},
422
{
423
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
424
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
425
__cpu_to_le32(5),
426
},
427
428
/* (Additions here) */
429
430
{ /* must be last */
431
__cpu_to_le32(0),
432
__cpu_to_le32(0),
433
__cpu_to_le32(0),
434
},
435
};
436
437
static bool ath10k_pci_is_awake(struct ath10k *ar)
438
{
439
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
440
#if defined(__linux__)
441
u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
442
RTC_STATE_ADDRESS);
443
#elif defined(__FreeBSD__)
444
u32 val = bus_read_4((struct resource *)ar_pci->mem, PCIE_LOCAL_BASE_ADDRESS +
445
RTC_STATE_ADDRESS);
446
#endif
447
448
return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
449
}
450
451
static void __ath10k_pci_wake(struct ath10k *ar)
452
{
453
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
454
455
lockdep_assert_held(&ar_pci->ps_lock);
456
457
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
458
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
459
460
#if defined(__linux__)
461
iowrite32(PCIE_SOC_WAKE_V_MASK,
462
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
463
PCIE_SOC_WAKE_ADDRESS);
464
#elif defined(__FreeBSD__)
465
bus_write_4((struct resource *)ar_pci->mem,
466
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
467
PCIE_SOC_WAKE_V_MASK);
468
#endif
469
}
470
471
static void __ath10k_pci_sleep(struct ath10k *ar)
472
{
473
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
474
475
lockdep_assert_held(&ar_pci->ps_lock);
476
477
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
478
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
479
480
#if defined(__linux__)
481
iowrite32(PCIE_SOC_WAKE_RESET,
482
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
483
PCIE_SOC_WAKE_ADDRESS);
484
#elif defined(__FreeBSD__)
485
bus_write_4((struct resource *)ar_pci->mem,
486
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
487
PCIE_SOC_WAKE_RESET);
488
#endif
489
ar_pci->ps_awake = false;
490
}
491
492
static int ath10k_pci_wake_wait(struct ath10k *ar)
493
{
494
int tot_delay = 0;
495
int curr_delay = 5;
496
497
while (tot_delay < PCIE_WAKE_TIMEOUT) {
498
if (ath10k_pci_is_awake(ar)) {
499
if (tot_delay > PCIE_WAKE_LATE_US)
500
ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
501
tot_delay / 1000);
502
return 0;
503
}
504
505
udelay(curr_delay);
506
tot_delay += curr_delay;
507
508
if (curr_delay < 50)
509
curr_delay += 5;
510
}
511
512
return -ETIMEDOUT;
513
}
514
515
static int ath10k_pci_force_wake(struct ath10k *ar)
516
{
517
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
518
unsigned long flags;
519
int ret = 0;
520
521
if (ar_pci->pci_ps)
522
return ret;
523
524
spin_lock_irqsave(&ar_pci->ps_lock, flags);
525
526
if (!ar_pci->ps_awake) {
527
#if defined(__linux__)
528
iowrite32(PCIE_SOC_WAKE_V_MASK,
529
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
530
PCIE_SOC_WAKE_ADDRESS);
531
#elif defined(__FreeBSD__)
532
bus_write_4((struct resource *)ar_pci->mem,
533
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
534
PCIE_SOC_WAKE_V_MASK);
535
#endif
536
537
ret = ath10k_pci_wake_wait(ar);
538
if (ret == 0)
539
ar_pci->ps_awake = true;
540
}
541
542
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
543
544
return ret;
545
}
546
547
static void ath10k_pci_force_sleep(struct ath10k *ar)
548
{
549
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
550
unsigned long flags;
551
552
spin_lock_irqsave(&ar_pci->ps_lock, flags);
553
554
#if defined(__linux__)
555
iowrite32(PCIE_SOC_WAKE_RESET,
556
ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
557
PCIE_SOC_WAKE_ADDRESS);
558
#elif defined(__FreeBSD__)
559
bus_write_4((struct resource *)ar_pci->mem,
560
PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
561
PCIE_SOC_WAKE_RESET);
562
#endif
563
ar_pci->ps_awake = false;
564
565
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
566
}
567
568
static int ath10k_pci_wake(struct ath10k *ar)
569
{
570
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
571
unsigned long flags;
572
int ret = 0;
573
574
if (ar_pci->pci_ps == 0)
575
return ret;
576
577
spin_lock_irqsave(&ar_pci->ps_lock, flags);
578
579
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
580
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
581
582
/* This function can be called very frequently. To avoid excessive
583
* CPU stalls for MMIO reads use a cache var to hold the device state.
584
*/
585
if (!ar_pci->ps_awake) {
586
__ath10k_pci_wake(ar);
587
588
ret = ath10k_pci_wake_wait(ar);
589
if (ret == 0)
590
ar_pci->ps_awake = true;
591
}
592
593
if (ret == 0) {
594
ar_pci->ps_wake_refcount++;
595
WARN_ON(ar_pci->ps_wake_refcount == 0);
596
}
597
598
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
599
600
return ret;
601
}
602
603
static void ath10k_pci_sleep(struct ath10k *ar)
604
{
605
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
606
unsigned long flags;
607
608
if (ar_pci->pci_ps == 0)
609
return;
610
611
spin_lock_irqsave(&ar_pci->ps_lock, flags);
612
613
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
614
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
615
616
if (WARN_ON(ar_pci->ps_wake_refcount == 0))
617
goto skip;
618
619
ar_pci->ps_wake_refcount--;
620
621
mod_timer(&ar_pci->ps_timer, jiffies +
622
msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
623
624
skip:
625
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
626
}
627
628
static void ath10k_pci_ps_timer(struct timer_list *t)
629
{
630
struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
631
struct ath10k *ar = ar_pci->ar;
632
unsigned long flags;
633
634
spin_lock_irqsave(&ar_pci->ps_lock, flags);
635
636
ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
637
ar_pci->ps_wake_refcount, ar_pci->ps_awake);
638
639
if (ar_pci->ps_wake_refcount > 0)
640
goto skip;
641
642
__ath10k_pci_sleep(ar);
643
644
skip:
645
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
646
}
647
648
static void ath10k_pci_sleep_sync(struct ath10k *ar)
649
{
650
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
651
unsigned long flags;
652
653
if (ar_pci->pci_ps == 0) {
654
ath10k_pci_force_sleep(ar);
655
return;
656
}
657
658
del_timer_sync(&ar_pci->ps_timer);
659
660
spin_lock_irqsave(&ar_pci->ps_lock, flags);
661
WARN_ON(ar_pci->ps_wake_refcount > 0);
662
__ath10k_pci_sleep(ar);
663
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
664
}
665
666
static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
667
{
668
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
669
int ret;
670
671
if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
672
ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
673
offset, offset + sizeof(value), ar_pci->mem_len);
674
return;
675
}
676
677
ret = ath10k_pci_wake(ar);
678
if (ret) {
679
ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
680
value, offset, ret);
681
return;
682
}
683
684
#if defined(__linux__)
685
iowrite32(value, ar_pci->mem + offset);
686
#elif defined(__FreeBSD__)
687
bus_write_4((struct resource *)ar_pci->mem, offset, value);
688
#endif
689
ath10k_pci_sleep(ar);
690
}
691
692
static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
693
{
694
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
695
u32 val;
696
int ret;
697
698
if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
699
ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
700
offset, offset + sizeof(val), ar_pci->mem_len);
701
return 0;
702
}
703
704
ret = ath10k_pci_wake(ar);
705
if (ret) {
706
ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
707
offset, ret);
708
return 0xffffffff;
709
}
710
711
#if defined(__linux__)
712
val = ioread32(ar_pci->mem + offset);
713
#elif defined(__FreeBSD__)
714
val = bus_read_4((struct resource *)ar_pci->mem, offset);
715
#endif
716
ath10k_pci_sleep(ar);
717
718
return val;
719
}
720
721
inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
722
{
723
struct ath10k_ce *ce = ath10k_ce_priv(ar);
724
725
ce->bus_ops->write32(ar, offset, value);
726
}
727
728
inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
729
{
730
struct ath10k_ce *ce = ath10k_ce_priv(ar);
731
732
return ce->bus_ops->read32(ar, offset);
733
}
734
735
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
736
{
737
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
738
}
739
740
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
741
{
742
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
743
}
744
745
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
746
{
747
return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
748
}
749
750
void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
751
{
752
ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
753
}
754
755
bool ath10k_pci_irq_pending(struct ath10k *ar)
756
{
757
u32 cause;
758
759
/* Check if the shared legacy irq is for us */
760
cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
761
PCIE_INTR_CAUSE_ADDRESS);
762
if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
763
return true;
764
765
return false;
766
}
767
768
void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
769
{
770
/* IMPORTANT: INTR_CLR register has to be set after
771
* INTR_ENABLE is set to 0, otherwise interrupt can not be
772
* really cleared.
773
*/
774
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
775
0);
776
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
777
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
778
779
/* IMPORTANT: this extra read transaction is required to
780
* flush the posted write buffer.
781
*/
782
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
783
PCIE_INTR_ENABLE_ADDRESS);
784
}
785
786
void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
787
{
788
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
789
PCIE_INTR_ENABLE_ADDRESS,
790
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
791
792
/* IMPORTANT: this extra read transaction is required to
793
* flush the posted write buffer.
794
*/
795
(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
796
PCIE_INTR_ENABLE_ADDRESS);
797
}
798
799
static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
800
{
801
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
802
803
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
804
return "msi";
805
806
return "legacy";
807
}
808
809
static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
810
{
811
struct ath10k *ar = pipe->hif_ce_state;
812
struct ath10k_ce *ce = ath10k_ce_priv(ar);
813
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
814
struct sk_buff *skb;
815
dma_addr_t paddr;
816
int ret;
817
818
skb = dev_alloc_skb(pipe->buf_sz);
819
if (!skb)
820
return -ENOMEM;
821
822
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
823
824
paddr = dma_map_single(ar->dev, skb->data,
825
skb->len + skb_tailroom(skb),
826
DMA_FROM_DEVICE);
827
if (unlikely(dma_mapping_error(ar->dev, paddr))) {
828
ath10k_warn(ar, "failed to dma map pci rx buf\n");
829
dev_kfree_skb_any(skb);
830
return -EIO;
831
}
832
833
ATH10K_SKB_RXCB(skb)->paddr = paddr;
834
835
spin_lock_bh(&ce->ce_lock);
836
ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
837
spin_unlock_bh(&ce->ce_lock);
838
if (ret) {
839
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
840
DMA_FROM_DEVICE);
841
dev_kfree_skb_any(skb);
842
return ret;
843
}
844
845
return 0;
846
}
847
848
static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
849
{
850
struct ath10k *ar = pipe->hif_ce_state;
851
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
852
struct ath10k_ce *ce = ath10k_ce_priv(ar);
853
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
854
int ret, num;
855
856
if (pipe->buf_sz == 0)
857
return;
858
859
if (!ce_pipe->dest_ring)
860
return;
861
862
spin_lock_bh(&ce->ce_lock);
863
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
864
spin_unlock_bh(&ce->ce_lock);
865
866
while (num >= 0) {
867
ret = __ath10k_pci_rx_post_buf(pipe);
868
if (ret) {
869
if (ret == -ENOSPC)
870
break;
871
ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
872
mod_timer(&ar_pci->rx_post_retry, jiffies +
873
ATH10K_PCI_RX_POST_RETRY_MS);
874
break;
875
}
876
num--;
877
}
878
}
879
880
void ath10k_pci_rx_post(struct ath10k *ar)
881
{
882
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
883
int i;
884
885
for (i = 0; i < CE_COUNT; i++)
886
ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
887
}
888
889
void ath10k_pci_rx_replenish_retry(struct timer_list *t)
890
{
891
struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
892
struct ath10k *ar = ar_pci->ar;
893
894
ath10k_pci_rx_post(ar);
895
}
896
897
static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
898
{
899
u32 val = 0, region = addr & 0xfffff;
900
901
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
902
& 0x7ff) << 21;
903
val |= 0x100000 | region;
904
return val;
905
}
906
907
/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
908
* Support to access target space below 1M for qca6174 and qca9377.
909
* If target space is below 1M, the bit[20] of converted CE addr is 0.
910
* Otherwise bit[20] of converted CE addr is 1.
911
*/
912
static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
913
{
914
u32 val = 0, region = addr & 0xfffff;
915
916
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
917
& 0x7ff) << 21;
918
val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
919
return val;
920
}
921
922
static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
923
{
924
u32 val = 0, region = addr & 0xfffff;
925
926
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
927
val |= 0x100000 | region;
928
return val;
929
}
930
931
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
932
{
933
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
934
935
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
936
return -ENOTSUPP;
937
938
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
939
}
940
941
/*
942
* Diagnostic read/write access is provided for startup/config/debug usage.
943
* Caller must guarantee proper alignment, when applicable, and single user
944
* at any moment.
945
*/
946
#if defined(__linux__)
947
static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
948
#elif defined(__FreeBSD__)
949
static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, u8 *data,
950
#endif
951
int nbytes)
952
{
953
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
954
int ret = 0;
955
u32 *buf;
956
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
957
struct ath10k_ce_pipe *ce_diag;
958
/* Host buffer address in CE space */
959
u32 ce_data;
960
dma_addr_t ce_data_base = 0;
961
void *data_buf;
962
int i;
963
964
mutex_lock(&ar_pci->ce_diag_mutex);
965
ce_diag = ar_pci->ce_diag;
966
967
/*
968
* Allocate a temporary bounce buffer to hold caller's data
969
* to be DMA'ed from Target. This guarantees
970
* 1) 4-byte alignment
971
* 2) Buffer in DMA-able space
972
*/
973
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
974
975
data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
976
GFP_ATOMIC);
977
if (!data_buf) {
978
ret = -ENOMEM;
979
goto done;
980
}
981
982
/* The address supplied by the caller is in the
983
* Target CPU virtual address space.
984
*
985
* In order to use this address with the diagnostic CE,
986
* convert it from Target CPU virtual address space
987
* to CE address space
988
*/
989
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
990
991
remaining_bytes = nbytes;
992
ce_data = ce_data_base;
993
while (remaining_bytes) {
994
nbytes = min_t(unsigned int, remaining_bytes,
995
DIAG_TRANSFER_LIMIT);
996
997
ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
998
if (ret != 0)
999
goto done;
1000
1001
/* Request CE to send from Target(!) address to Host buffer */
1002
ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
1003
if (ret)
1004
goto done;
1005
1006
i = 0;
1007
while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1008
udelay(DIAG_ACCESS_CE_WAIT_US);
1009
i += DIAG_ACCESS_CE_WAIT_US;
1010
1011
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1012
ret = -EBUSY;
1013
goto done;
1014
}
1015
}
1016
1017
i = 0;
1018
while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1019
&completed_nbytes) != 0) {
1020
udelay(DIAG_ACCESS_CE_WAIT_US);
1021
i += DIAG_ACCESS_CE_WAIT_US;
1022
1023
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1024
ret = -EBUSY;
1025
goto done;
1026
}
1027
}
1028
1029
if (nbytes != completed_nbytes) {
1030
ret = -EIO;
1031
goto done;
1032
}
1033
1034
if (*buf != ce_data) {
1035
ret = -EIO;
1036
goto done;
1037
}
1038
1039
remaining_bytes -= nbytes;
1040
memcpy(data, data_buf, nbytes);
1041
1042
address += nbytes;
1043
data += nbytes;
1044
}
1045
1046
done:
1047
1048
if (data_buf)
1049
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1050
ce_data_base);
1051
1052
mutex_unlock(&ar_pci->ce_diag_mutex);
1053
1054
return ret;
1055
}
1056
1057
static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1058
{
1059
__le32 val = 0;
1060
int ret;
1061
1062
#if defined(__linux__)
1063
ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1064
#elif defined(__FreeBSD__)
1065
ret = ath10k_pci_diag_read_mem(ar, address, (u8 *)&val, sizeof(val));
1066
#endif
1067
*value = __le32_to_cpu(val);
1068
1069
return ret;
1070
}
1071
1072
static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1073
u32 src, u32 len)
1074
{
1075
u32 host_addr, addr;
1076
int ret;
1077
1078
host_addr = host_interest_item_address(src);
1079
1080
ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1081
if (ret != 0) {
1082
ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1083
src, ret);
1084
return ret;
1085
}
1086
1087
ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1088
if (ret != 0) {
1089
ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1090
addr, len, ret);
1091
return ret;
1092
}
1093
1094
return 0;
1095
}
1096
1097
#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1098
__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1099
1100
int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1101
#if defined(__linux__)
1102
const void *data, int nbytes)
1103
#elif defined(__FreeBSD__)
1104
const void *_d, int nbytes)
1105
#endif
1106
{
1107
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1108
int ret = 0;
1109
u32 *buf;
1110
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1111
struct ath10k_ce_pipe *ce_diag;
1112
void *data_buf;
1113
dma_addr_t ce_data_base = 0;
1114
int i;
1115
#if defined(__FreeBSD__)
1116
const u8 *data = _d;
1117
#endif
1118
1119
mutex_lock(&ar_pci->ce_diag_mutex);
1120
ce_diag = ar_pci->ce_diag;
1121
1122
/*
1123
* Allocate a temporary bounce buffer to hold caller's data
1124
* to be DMA'ed to Target. This guarantees
1125
* 1) 4-byte alignment
1126
* 2) Buffer in DMA-able space
1127
*/
1128
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1129
1130
data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1131
GFP_ATOMIC);
1132
if (!data_buf) {
1133
ret = -ENOMEM;
1134
goto done;
1135
}
1136
1137
/*
1138
* The address supplied by the caller is in the
1139
* Target CPU virtual address space.
1140
*
1141
* In order to use this address with the diagnostic CE,
1142
* convert it from
1143
* Target CPU virtual address space
1144
* to
1145
* CE address space
1146
*/
1147
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1148
1149
remaining_bytes = nbytes;
1150
while (remaining_bytes) {
1151
/* FIXME: check cast */
1152
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1153
1154
/* Copy caller's data to allocated DMA buf */
1155
memcpy(data_buf, data, nbytes);
1156
1157
/* Set up to receive directly into Target(!) address */
1158
ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1159
if (ret != 0)
1160
goto done;
1161
1162
/*
1163
* Request CE to send caller-supplied data that
1164
* was copied to bounce buffer to Target(!) address.
1165
*/
1166
ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1167
if (ret != 0)
1168
goto done;
1169
1170
i = 0;
1171
while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1172
udelay(DIAG_ACCESS_CE_WAIT_US);
1173
i += DIAG_ACCESS_CE_WAIT_US;
1174
1175
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1176
ret = -EBUSY;
1177
goto done;
1178
}
1179
}
1180
1181
i = 0;
1182
while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1183
&completed_nbytes) != 0) {
1184
udelay(DIAG_ACCESS_CE_WAIT_US);
1185
i += DIAG_ACCESS_CE_WAIT_US;
1186
1187
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1188
ret = -EBUSY;
1189
goto done;
1190
}
1191
}
1192
1193
if (nbytes != completed_nbytes) {
1194
ret = -EIO;
1195
goto done;
1196
}
1197
1198
if (*buf != address) {
1199
ret = -EIO;
1200
goto done;
1201
}
1202
1203
remaining_bytes -= nbytes;
1204
address += nbytes;
1205
data += nbytes;
1206
}
1207
1208
done:
1209
if (data_buf) {
1210
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1211
ce_data_base);
1212
}
1213
1214
if (ret != 0)
1215
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1216
address, ret);
1217
1218
mutex_unlock(&ar_pci->ce_diag_mutex);
1219
1220
return ret;
1221
}
1222
1223
static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1224
{
1225
__le32 val = __cpu_to_le32(value);
1226
1227
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1228
}
1229
1230
/* Called by lower (CE) layer when a send to Target completes. */
1231
static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1232
{
1233
struct ath10k *ar = ce_state->ar;
1234
struct sk_buff_head list;
1235
struct sk_buff *skb;
1236
1237
__skb_queue_head_init(&list);
1238
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1239
/* no need to call tx completion for NULL pointers */
1240
if (skb == NULL)
1241
continue;
1242
1243
__skb_queue_tail(&list, skb);
1244
}
1245
1246
while ((skb = __skb_dequeue(&list)))
1247
ath10k_htc_tx_completion_handler(ar, skb);
1248
}
1249
1250
static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1251
void (*callback)(struct ath10k *ar,
1252
struct sk_buff *skb))
1253
{
1254
struct ath10k *ar = ce_state->ar;
1255
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1256
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1257
struct sk_buff *skb;
1258
struct sk_buff_head list;
1259
void *transfer_context;
1260
unsigned int nbytes, max_nbytes;
1261
1262
__skb_queue_head_init(&list);
1263
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1264
&nbytes) == 0) {
1265
skb = transfer_context;
1266
max_nbytes = skb->len + skb_tailroom(skb);
1267
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1268
max_nbytes, DMA_FROM_DEVICE);
1269
1270
if (unlikely(max_nbytes < nbytes)) {
1271
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1272
nbytes, max_nbytes);
1273
dev_kfree_skb_any(skb);
1274
continue;
1275
}
1276
1277
skb_put(skb, nbytes);
1278
__skb_queue_tail(&list, skb);
1279
}
1280
1281
while ((skb = __skb_dequeue(&list))) {
1282
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1283
ce_state->id, skb->len);
1284
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1285
skb->data, skb->len);
1286
1287
callback(ar, skb);
1288
}
1289
1290
ath10k_pci_rx_post_pipe(pipe_info);
1291
}
1292
1293
static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1294
void (*callback)(struct ath10k *ar,
1295
struct sk_buff *skb))
1296
{
1297
struct ath10k *ar = ce_state->ar;
1298
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1299
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1300
struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1301
struct sk_buff *skb;
1302
struct sk_buff_head list;
1303
void *transfer_context;
1304
unsigned int nbytes, max_nbytes, nentries;
1305
int orig_len;
1306
1307
/* No need to acquire ce_lock for CE5, since this is the only place CE5
1308
* is processed other than init and deinit. Before releasing CE5
1309
* buffers, interrupts are disabled. Thus CE5 access is serialized.
1310
*/
1311
__skb_queue_head_init(&list);
1312
while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1313
&nbytes) == 0) {
1314
skb = transfer_context;
1315
max_nbytes = skb->len + skb_tailroom(skb);
1316
1317
if (unlikely(max_nbytes < nbytes)) {
1318
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1319
nbytes, max_nbytes);
1320
continue;
1321
}
1322
1323
dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1324
max_nbytes, DMA_FROM_DEVICE);
1325
skb_put(skb, nbytes);
1326
__skb_queue_tail(&list, skb);
1327
}
1328
1329
nentries = skb_queue_len(&list);
1330
while ((skb = __skb_dequeue(&list))) {
1331
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1332
ce_state->id, skb->len);
1333
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1334
skb->data, skb->len);
1335
1336
orig_len = skb->len;
1337
callback(ar, skb);
1338
skb_push(skb, orig_len - skb->len);
1339
skb_reset_tail_pointer(skb);
1340
skb_trim(skb, 0);
1341
1342
/*let device gain the buffer again*/
1343
dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1344
skb->len + skb_tailroom(skb),
1345
DMA_FROM_DEVICE);
1346
}
1347
ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1348
}
1349
1350
/* Called by lower (CE) layer when data is received from the Target. */
1351
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1352
{
1353
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1354
}
1355
1356
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1357
{
1358
/* CE4 polling needs to be done whenever CE pipe which transports
1359
* HTT Rx (target->host) is processed.
1360
*/
1361
ath10k_ce_per_engine_service(ce_state->ar, 4);
1362
1363
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1364
}
1365
1366
/* Called by lower (CE) layer when data is received from the Target.
1367
* Only 10.4 firmware uses separate CE to transfer pktlog data.
1368
*/
1369
static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1370
{
1371
ath10k_pci_process_rx_cb(ce_state,
1372
ath10k_htt_rx_pktlog_completion_handler);
1373
}
1374
1375
/* Called by lower (CE) layer when a send to HTT Target completes. */
1376
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1377
{
1378
struct ath10k *ar = ce_state->ar;
1379
struct sk_buff *skb;
1380
1381
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1382
/* no need to call tx completion for NULL pointers */
1383
if (!skb)
1384
continue;
1385
1386
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1387
skb->len, DMA_TO_DEVICE);
1388
ath10k_htt_hif_tx_complete(ar, skb);
1389
}
1390
}
1391
1392
static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1393
{
1394
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1395
ath10k_htt_t2h_msg_handler(ar, skb);
1396
}
1397
1398
/* Called by lower (CE) layer when HTT data is received from the Target. */
1399
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1400
{
1401
/* CE4 polling needs to be done whenever CE pipe which transports
1402
* HTT Rx (target->host) is processed.
1403
*/
1404
ath10k_ce_per_engine_service(ce_state->ar, 4);
1405
1406
ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1407
}
1408
1409
int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1410
struct ath10k_hif_sg_item *items, int n_items)
1411
{
1412
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1413
struct ath10k_ce *ce = ath10k_ce_priv(ar);
1414
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1415
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1416
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1417
unsigned int nentries_mask;
1418
unsigned int sw_index;
1419
unsigned int write_index;
1420
int err, i = 0;
1421
1422
spin_lock_bh(&ce->ce_lock);
1423
1424
nentries_mask = src_ring->nentries_mask;
1425
sw_index = src_ring->sw_index;
1426
write_index = src_ring->write_index;
1427
1428
if (unlikely(CE_RING_DELTA(nentries_mask,
1429
write_index, sw_index - 1) < n_items)) {
1430
err = -ENOBUFS;
1431
goto err;
1432
}
1433
1434
for (i = 0; i < n_items - 1; i++) {
1435
ath10k_dbg(ar, ATH10K_DBG_PCI,
1436
"pci tx item %d paddr %pad len %d n_items %d\n",
1437
i, &items[i].paddr, items[i].len, n_items);
1438
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1439
items[i].vaddr, items[i].len);
1440
1441
err = ath10k_ce_send_nolock(ce_pipe,
1442
items[i].transfer_context,
1443
items[i].paddr,
1444
items[i].len,
1445
items[i].transfer_id,
1446
CE_SEND_FLAG_GATHER);
1447
if (err)
1448
goto err;
1449
}
1450
1451
/* `i` is equal to `n_items -1` after for() */
1452
1453
ath10k_dbg(ar, ATH10K_DBG_PCI,
1454
#if defined(__linux__)
1455
"pci tx item %d paddr %pad len %d n_items %d\n",
1456
i, &items[i].paddr, items[i].len, n_items);
1457
#elif defined(__FreeBSD__)
1458
"pci tx item %d paddr %pad len %d n_items %d pipe_id %u\n",
1459
i, &items[i].paddr, items[i].len, n_items, pipe_id);
1460
/*
1461
* XXX-BZ specific debug; the DELAY makes things work for one chipset.
1462
* There's likely a race somewhere (here or LinuxKPI).
1463
*/
1464
if (n_items == 1 && items[i].len == 140) {
1465
ath10k_dbg_dump(ar, ATH10K_DBG_PCI, NULL, "pci tx data: ",
1466
items[i].vaddr, items[i].len);
1467
dump_stack();
1468
DELAY(500);
1469
}
1470
#endif
1471
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1472
items[i].vaddr, items[i].len);
1473
1474
err = ath10k_ce_send_nolock(ce_pipe,
1475
items[i].transfer_context,
1476
items[i].paddr,
1477
items[i].len,
1478
items[i].transfer_id,
1479
0);
1480
if (err)
1481
goto err;
1482
1483
spin_unlock_bh(&ce->ce_lock);
1484
return 0;
1485
1486
err:
1487
for (; i > 0; i--)
1488
__ath10k_ce_send_revert(ce_pipe);
1489
1490
spin_unlock_bh(&ce->ce_lock);
1491
return err;
1492
}
1493
1494
int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1495
size_t buf_len)
1496
{
1497
return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1498
}
1499
1500
u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1501
{
1502
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1503
1504
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1505
1506
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1507
}
1508
1509
static void ath10k_pci_dump_registers(struct ath10k *ar,
1510
struct ath10k_fw_crash_data *crash_data)
1511
{
1512
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1513
int i, ret;
1514
1515
lockdep_assert_held(&ar->dump_mutex);
1516
1517
ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1518
hi_failure_state,
1519
REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1520
if (ret) {
1521
ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1522
return;
1523
}
1524
1525
BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1526
1527
ath10k_err(ar, "firmware register dump:\n");
1528
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1529
ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1530
i,
1531
__le32_to_cpu(reg_dump_values[i]),
1532
__le32_to_cpu(reg_dump_values[i + 1]),
1533
__le32_to_cpu(reg_dump_values[i + 2]),
1534
__le32_to_cpu(reg_dump_values[i + 3]));
1535
1536
if (!crash_data)
1537
return;
1538
1539
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1540
crash_data->registers[i] = reg_dump_values[i];
1541
}
1542
1543
static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1544
const struct ath10k_mem_region *mem_region,
1545
u8 *buf, size_t buf_len)
1546
{
1547
const struct ath10k_mem_section *cur_section, *next_section;
1548
unsigned int count, section_size, skip_size;
1549
int ret, i, j;
1550
1551
if (!mem_region || !buf)
1552
return 0;
1553
1554
cur_section = &mem_region->section_table.sections[0];
1555
1556
if (mem_region->start > cur_section->start) {
1557
ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1558
mem_region->start, cur_section->start);
1559
return 0;
1560
}
1561
1562
skip_size = cur_section->start - mem_region->start;
1563
1564
/* fill the gap between the first register section and register
1565
* start address
1566
*/
1567
for (i = 0; i < skip_size; i++) {
1568
*buf = ATH10K_MAGIC_NOT_COPIED;
1569
buf++;
1570
}
1571
1572
count = 0;
1573
1574
for (i = 0; cur_section != NULL; i++) {
1575
section_size = cur_section->end - cur_section->start;
1576
1577
if (section_size <= 0) {
1578
ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1579
cur_section->start,
1580
cur_section->end);
1581
break;
1582
}
1583
1584
if ((i + 1) == mem_region->section_table.size) {
1585
/* last section */
1586
next_section = NULL;
1587
skip_size = 0;
1588
} else {
1589
next_section = cur_section + 1;
1590
1591
if (cur_section->end > next_section->start) {
1592
ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1593
next_section->start,
1594
cur_section->end);
1595
break;
1596
}
1597
1598
skip_size = next_section->start - cur_section->end;
1599
}
1600
1601
if (buf_len < (skip_size + section_size)) {
1602
ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1603
break;
1604
}
1605
1606
buf_len -= skip_size + section_size;
1607
1608
/* read section to dest memory */
1609
ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1610
buf, section_size);
1611
if (ret) {
1612
ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1613
cur_section->start, ret);
1614
break;
1615
}
1616
1617
buf += section_size;
1618
count += section_size;
1619
1620
/* fill in the gap between this section and the next */
1621
for (j = 0; j < skip_size; j++) {
1622
*buf = ATH10K_MAGIC_NOT_COPIED;
1623
buf++;
1624
}
1625
1626
count += skip_size;
1627
1628
if (!next_section)
1629
/* this was the last section */
1630
break;
1631
1632
cur_section = next_section;
1633
}
1634
1635
return count;
1636
}
1637
1638
static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1639
{
1640
u32 val;
1641
1642
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1643
FW_RAM_CONFIG_ADDRESS, config);
1644
1645
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1646
FW_RAM_CONFIG_ADDRESS);
1647
if (val != config) {
1648
ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1649
val, config);
1650
return -EIO;
1651
}
1652
1653
return 0;
1654
}
1655
1656
/* Always returns the length */
1657
static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1658
const struct ath10k_mem_region *region,
1659
u8 *buf)
1660
{
1661
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1662
u32 base_addr, i;
1663
1664
#if defined(__linux__)
1665
base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1666
#elif defined(__FreeBSD__)
1667
base_addr = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_PCIE_BAR0_START_REG);
1668
#endif
1669
base_addr += region->start;
1670
1671
for (i = 0; i < region->len; i += 4) {
1672
#if defined(__linux__)
1673
iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1674
*(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1675
#elif defined(__FreeBSD__)
1676
bus_write_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_ADDR_REG, base_addr + i);
1677
*(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_DATA_REG);
1678
#endif
1679
}
1680
1681
return region->len;
1682
}
1683
1684
/* if an error happened returns < 0, otherwise the length */
1685
static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1686
const struct ath10k_mem_region *region,
1687
u8 *buf)
1688
{
1689
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1690
u32 i;
1691
int ret;
1692
1693
mutex_lock(&ar->conf_mutex);
1694
if (ar->state != ATH10K_STATE_ON) {
1695
ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1696
ret = -EIO;
1697
goto done;
1698
}
1699
1700
for (i = 0; i < region->len; i += 4)
1701
#if defined(__linux__)
1702
*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1703
#elif defined(__FreeBSD__)
1704
*(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, region->start + i);
1705
#endif
1706
1707
ret = region->len;
1708
done:
1709
mutex_unlock(&ar->conf_mutex);
1710
return ret;
1711
}
1712
1713
/* if an error happened returns < 0, otherwise the length */
1714
static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1715
const struct ath10k_mem_region *current_region,
1716
u8 *buf)
1717
{
1718
int ret;
1719
1720
if (current_region->section_table.size > 0)
1721
/* Copy each section individually. */
1722
return ath10k_pci_dump_memory_section(ar,
1723
current_region,
1724
buf,
1725
current_region->len);
1726
1727
/* No individiual memory sections defined so we can
1728
* copy the entire memory region.
1729
*/
1730
ret = ath10k_pci_diag_read_mem(ar,
1731
current_region->start,
1732
buf,
1733
current_region->len);
1734
if (ret) {
1735
ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1736
current_region->name, ret);
1737
return ret;
1738
}
1739
1740
return current_region->len;
1741
}
1742
1743
static void ath10k_pci_dump_memory(struct ath10k *ar,
1744
struct ath10k_fw_crash_data *crash_data)
1745
{
1746
const struct ath10k_hw_mem_layout *mem_layout;
1747
const struct ath10k_mem_region *current_region;
1748
struct ath10k_dump_ram_data_hdr *hdr;
1749
u32 count, shift;
1750
size_t buf_len;
1751
int ret, i;
1752
u8 *buf;
1753
1754
lockdep_assert_held(&ar->dump_mutex);
1755
1756
if (!crash_data)
1757
return;
1758
1759
mem_layout = ath10k_coredump_get_mem_layout(ar);
1760
if (!mem_layout)
1761
return;
1762
1763
current_region = &mem_layout->region_table.regions[0];
1764
1765
buf = crash_data->ramdump_buf;
1766
buf_len = crash_data->ramdump_buf_len;
1767
1768
memset(buf, 0, buf_len);
1769
1770
for (i = 0; i < mem_layout->region_table.size; i++) {
1771
count = 0;
1772
1773
if (current_region->len > buf_len) {
1774
ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1775
current_region->name,
1776
current_region->len,
1777
buf_len);
1778
break;
1779
}
1780
1781
/* To get IRAM dump, the host driver needs to switch target
1782
* ram config from DRAM to IRAM.
1783
*/
1784
if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1785
current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1786
shift = current_region->start >> 20;
1787
1788
ret = ath10k_pci_set_ram_config(ar, shift);
1789
if (ret) {
1790
ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1791
current_region->name, ret);
1792
break;
1793
}
1794
}
1795
1796
/* Reserve space for the header. */
1797
hdr = (void *)buf;
1798
buf += sizeof(*hdr);
1799
buf_len -= sizeof(*hdr);
1800
1801
switch (current_region->type) {
1802
case ATH10K_MEM_REGION_TYPE_IOSRAM:
1803
count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1804
break;
1805
case ATH10K_MEM_REGION_TYPE_IOREG:
1806
ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1807
if (ret < 0)
1808
break;
1809
1810
count = ret;
1811
break;
1812
default:
1813
ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1814
if (ret < 0)
1815
break;
1816
1817
count = ret;
1818
break;
1819
}
1820
1821
hdr->region_type = cpu_to_le32(current_region->type);
1822
hdr->start = cpu_to_le32(current_region->start);
1823
hdr->length = cpu_to_le32(count);
1824
1825
if (count == 0)
1826
/* Note: the header remains, just with zero length. */
1827
break;
1828
1829
buf += count;
1830
buf_len -= count;
1831
1832
current_region++;
1833
}
1834
}
1835
1836
static void ath10k_pci_fw_dump_work(struct work_struct *work)
1837
{
1838
struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1839
dump_work);
1840
struct ath10k_fw_crash_data *crash_data;
1841
struct ath10k *ar = ar_pci->ar;
1842
char guid[UUID_STRING_LEN + 1];
1843
1844
mutex_lock(&ar->dump_mutex);
1845
1846
spin_lock_bh(&ar->data_lock);
1847
ar->stats.fw_crash_counter++;
1848
spin_unlock_bh(&ar->data_lock);
1849
1850
crash_data = ath10k_coredump_new(ar);
1851
1852
if (crash_data)
1853
scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1854
else
1855
scnprintf(guid, sizeof(guid), "n/a");
1856
1857
ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1858
ath10k_print_driver_info(ar);
1859
ath10k_pci_dump_registers(ar, crash_data);
1860
ath10k_ce_dump_registers(ar, crash_data);
1861
ath10k_pci_dump_memory(ar, crash_data);
1862
1863
mutex_unlock(&ar->dump_mutex);
1864
1865
ath10k_core_start_recovery(ar);
1866
}
1867
1868
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1869
{
1870
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1871
1872
queue_work(ar->workqueue, &ar_pci->dump_work);
1873
}
1874
1875
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1876
int force)
1877
{
1878
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1879
1880
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1881
1882
if (!force) {
1883
int resources;
1884
/*
1885
* Decide whether to actually poll for completions, or just
1886
* wait for a later chance.
1887
* If there seem to be plenty of resources left, then just wait
1888
* since checking involves reading a CE register, which is a
1889
* relatively expensive operation.
1890
*/
1891
resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1892
1893
/*
1894
* If at least 50% of the total resources are still available,
1895
* don't bother checking again yet.
1896
*/
1897
if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
1898
return;
1899
}
1900
ath10k_ce_per_engine_service(ar, pipe);
1901
}
1902
1903
static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1904
{
1905
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1906
1907
del_timer_sync(&ar_pci->rx_post_retry);
1908
}
1909
1910
int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1911
u8 *ul_pipe, u8 *dl_pipe)
1912
{
1913
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1914
const struct ce_service_to_pipe *entry;
1915
bool ul_set = false, dl_set = false;
1916
int i;
1917
1918
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1919
1920
for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
1921
entry = &ar_pci->serv_to_pipe[i];
1922
1923
if (__le32_to_cpu(entry->service_id) != service_id)
1924
continue;
1925
1926
switch (__le32_to_cpu(entry->pipedir)) {
1927
case PIPEDIR_NONE:
1928
break;
1929
case PIPEDIR_IN:
1930
WARN_ON(dl_set);
1931
*dl_pipe = __le32_to_cpu(entry->pipenum);
1932
dl_set = true;
1933
break;
1934
case PIPEDIR_OUT:
1935
WARN_ON(ul_set);
1936
*ul_pipe = __le32_to_cpu(entry->pipenum);
1937
ul_set = true;
1938
break;
1939
case PIPEDIR_INOUT:
1940
WARN_ON(dl_set);
1941
WARN_ON(ul_set);
1942
*dl_pipe = __le32_to_cpu(entry->pipenum);
1943
*ul_pipe = __le32_to_cpu(entry->pipenum);
1944
dl_set = true;
1945
ul_set = true;
1946
break;
1947
}
1948
}
1949
1950
if (!ul_set || !dl_set)
1951
return -ENOENT;
1952
1953
return 0;
1954
}
1955
1956
void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1957
u8 *ul_pipe, u8 *dl_pipe)
1958
{
1959
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1960
1961
(void)ath10k_pci_hif_map_service_to_pipe(ar,
1962
ATH10K_HTC_SVC_ID_RSVD_CTRL,
1963
ul_pipe, dl_pipe);
1964
}
1965
1966
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1967
{
1968
u32 val;
1969
1970
switch (ar->hw_rev) {
1971
case ATH10K_HW_QCA988X:
1972
case ATH10K_HW_QCA9887:
1973
case ATH10K_HW_QCA6174:
1974
case ATH10K_HW_QCA9377:
1975
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1976
CORE_CTRL_ADDRESS);
1977
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1978
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1979
CORE_CTRL_ADDRESS, val);
1980
break;
1981
case ATH10K_HW_QCA99X0:
1982
case ATH10K_HW_QCA9984:
1983
case ATH10K_HW_QCA9888:
1984
case ATH10K_HW_QCA4019:
1985
/* TODO: Find appropriate register configuration for QCA99X0
1986
* to mask irq/MSI.
1987
*/
1988
break;
1989
case ATH10K_HW_WCN3990:
1990
break;
1991
}
1992
}
1993
1994
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1995
{
1996
u32 val;
1997
1998
switch (ar->hw_rev) {
1999
case ATH10K_HW_QCA988X:
2000
case ATH10K_HW_QCA9887:
2001
case ATH10K_HW_QCA6174:
2002
case ATH10K_HW_QCA9377:
2003
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2004
CORE_CTRL_ADDRESS);
2005
val |= CORE_CTRL_PCIE_REG_31_MASK;
2006
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2007
CORE_CTRL_ADDRESS, val);
2008
break;
2009
case ATH10K_HW_QCA99X0:
2010
case ATH10K_HW_QCA9984:
2011
case ATH10K_HW_QCA9888:
2012
case ATH10K_HW_QCA4019:
2013
/* TODO: Find appropriate register configuration for QCA99X0
2014
* to unmask irq/MSI.
2015
*/
2016
break;
2017
case ATH10K_HW_WCN3990:
2018
break;
2019
}
2020
}
2021
2022
static void ath10k_pci_irq_disable(struct ath10k *ar)
2023
{
2024
ath10k_ce_disable_interrupts(ar);
2025
ath10k_pci_disable_and_clear_legacy_irq(ar);
2026
ath10k_pci_irq_msi_fw_mask(ar);
2027
}
2028
2029
static void ath10k_pci_irq_sync(struct ath10k *ar)
2030
{
2031
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2032
2033
synchronize_irq(ar_pci->pdev->irq);
2034
}
2035
2036
static void ath10k_pci_irq_enable(struct ath10k *ar)
2037
{
2038
ath10k_ce_enable_interrupts(ar);
2039
ath10k_pci_enable_legacy_irq(ar);
2040
ath10k_pci_irq_msi_fw_unmask(ar);
2041
}
2042
2043
static int ath10k_pci_hif_start(struct ath10k *ar)
2044
{
2045
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2046
2047
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
2048
2049
ath10k_core_napi_enable(ar);
2050
2051
ath10k_pci_irq_enable(ar);
2052
ath10k_pci_rx_post(ar);
2053
2054
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2055
ar_pci->link_ctl);
2056
2057
return 0;
2058
}
2059
2060
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2061
{
2062
struct ath10k *ar;
2063
struct ath10k_ce_pipe *ce_pipe;
2064
struct ath10k_ce_ring *ce_ring;
2065
struct sk_buff *skb;
2066
int i;
2067
2068
ar = pci_pipe->hif_ce_state;
2069
ce_pipe = pci_pipe->ce_hdl;
2070
ce_ring = ce_pipe->dest_ring;
2071
2072
if (!ce_ring)
2073
return;
2074
2075
if (!pci_pipe->buf_sz)
2076
return;
2077
2078
for (i = 0; i < ce_ring->nentries; i++) {
2079
skb = ce_ring->per_transfer_context[i];
2080
if (!skb)
2081
continue;
2082
2083
ce_ring->per_transfer_context[i] = NULL;
2084
2085
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
2086
skb->len + skb_tailroom(skb),
2087
DMA_FROM_DEVICE);
2088
dev_kfree_skb_any(skb);
2089
}
2090
}
2091
2092
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2093
{
2094
struct ath10k *ar;
2095
struct ath10k_ce_pipe *ce_pipe;
2096
struct ath10k_ce_ring *ce_ring;
2097
struct sk_buff *skb;
2098
int i;
2099
2100
ar = pci_pipe->hif_ce_state;
2101
ce_pipe = pci_pipe->ce_hdl;
2102
ce_ring = ce_pipe->src_ring;
2103
2104
if (!ce_ring)
2105
return;
2106
2107
if (!pci_pipe->buf_sz)
2108
return;
2109
2110
for (i = 0; i < ce_ring->nentries; i++) {
2111
skb = ce_ring->per_transfer_context[i];
2112
if (!skb)
2113
continue;
2114
2115
ce_ring->per_transfer_context[i] = NULL;
2116
2117
ath10k_htc_tx_completion_handler(ar, skb);
2118
}
2119
}
2120
2121
/*
2122
* Cleanup residual buffers for device shutdown:
2123
* buffers that were enqueued for receive
2124
* buffers that were to be sent
2125
* Note: Buffers that had completed but which were
2126
* not yet processed are on a completion queue. They
2127
* are handled when the completion thread shuts down.
2128
*/
2129
static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2130
{
2131
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2132
int pipe_num;
2133
2134
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2135
struct ath10k_pci_pipe *pipe_info;
2136
2137
pipe_info = &ar_pci->pipe_info[pipe_num];
2138
ath10k_pci_rx_pipe_cleanup(pipe_info);
2139
ath10k_pci_tx_pipe_cleanup(pipe_info);
2140
}
2141
}
2142
2143
void ath10k_pci_ce_deinit(struct ath10k *ar)
2144
{
2145
int i;
2146
2147
for (i = 0; i < CE_COUNT; i++)
2148
ath10k_ce_deinit_pipe(ar, i);
2149
}
2150
2151
void ath10k_pci_flush(struct ath10k *ar)
2152
{
2153
ath10k_pci_rx_retry_sync(ar);
2154
ath10k_pci_buffer_cleanup(ar);
2155
}
2156
2157
static void ath10k_pci_hif_stop(struct ath10k *ar)
2158
{
2159
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2160
unsigned long flags;
2161
2162
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2163
2164
ath10k_pci_irq_disable(ar);
2165
ath10k_pci_irq_sync(ar);
2166
2167
ath10k_core_napi_sync_disable(ar);
2168
2169
cancel_work_sync(&ar_pci->dump_work);
2170
2171
/* Most likely the device has HTT Rx ring configured. The only way to
2172
* prevent the device from accessing (and possible corrupting) host
2173
* memory is to reset the chip now.
2174
*
2175
* There's also no known way of masking MSI interrupts on the device.
2176
* For ranged MSI the CE-related interrupts can be masked. However
2177
* regardless how many MSI interrupts are assigned the first one
2178
* is always used for firmware indications (crashes) and cannot be
2179
* masked. To prevent the device from asserting the interrupt reset it
2180
* before proceeding with cleanup.
2181
*/
2182
ath10k_pci_safe_chip_reset(ar);
2183
2184
ath10k_pci_flush(ar);
2185
2186
spin_lock_irqsave(&ar_pci->ps_lock, flags);
2187
WARN_ON(ar_pci->ps_wake_refcount > 0);
2188
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2189
}
2190
2191
int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2192
void *req, u32 req_len,
2193
void *resp, u32 *resp_len)
2194
{
2195
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2196
struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2197
struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2198
struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2199
struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2200
dma_addr_t req_paddr = 0;
2201
dma_addr_t resp_paddr = 0;
2202
struct bmi_xfer xfer = {};
2203
void *treq, *tresp = NULL;
2204
int ret = 0;
2205
2206
might_sleep();
2207
2208
if (resp && !resp_len)
2209
return -EINVAL;
2210
2211
if (resp && resp_len && *resp_len == 0)
2212
return -EINVAL;
2213
2214
treq = kmemdup(req, req_len, GFP_KERNEL);
2215
if (!treq)
2216
return -ENOMEM;
2217
2218
req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2219
ret = dma_mapping_error(ar->dev, req_paddr);
2220
if (ret) {
2221
ret = -EIO;
2222
goto err_dma;
2223
}
2224
2225
if (resp && resp_len) {
2226
tresp = kzalloc(*resp_len, GFP_KERNEL);
2227
if (!tresp) {
2228
ret = -ENOMEM;
2229
goto err_req;
2230
}
2231
2232
resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2233
DMA_FROM_DEVICE);
2234
ret = dma_mapping_error(ar->dev, resp_paddr);
2235
if (ret) {
2236
ret = -EIO;
2237
goto err_req;
2238
}
2239
2240
xfer.wait_for_resp = true;
2241
xfer.resp_len = 0;
2242
2243
ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2244
}
2245
2246
ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2247
if (ret)
2248
goto err_resp;
2249
2250
ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2251
if (ret) {
2252
dma_addr_t unused_buffer;
2253
unsigned int unused_nbytes;
2254
unsigned int unused_id;
2255
2256
ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2257
&unused_nbytes, &unused_id);
2258
} else {
2259
/* non-zero means we did not time out */
2260
ret = 0;
2261
}
2262
2263
err_resp:
2264
if (resp) {
2265
dma_addr_t unused_buffer;
2266
2267
ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2268
dma_unmap_single(ar->dev, resp_paddr,
2269
*resp_len, DMA_FROM_DEVICE);
2270
}
2271
err_req:
2272
dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2273
2274
if (ret == 0 && resp_len) {
2275
*resp_len = min(*resp_len, xfer.resp_len);
2276
memcpy(resp, tresp, *resp_len);
2277
}
2278
err_dma:
2279
kfree(treq);
2280
kfree(tresp);
2281
2282
return ret;
2283
}
2284
2285
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2286
{
2287
struct bmi_xfer *xfer;
2288
2289
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2290
return;
2291
2292
xfer->tx_done = true;
2293
}
2294
2295
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2296
{
2297
struct ath10k *ar = ce_state->ar;
2298
struct bmi_xfer *xfer;
2299
unsigned int nbytes;
2300
2301
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2302
&nbytes))
2303
return;
2304
2305
if (WARN_ON_ONCE(!xfer))
2306
return;
2307
2308
if (!xfer->wait_for_resp) {
2309
ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2310
return;
2311
}
2312
2313
xfer->resp_len = nbytes;
2314
xfer->rx_done = true;
2315
}
2316
2317
static int ath10k_pci_bmi_wait(struct ath10k *ar,
2318
struct ath10k_ce_pipe *tx_pipe,
2319
struct ath10k_ce_pipe *rx_pipe,
2320
struct bmi_xfer *xfer)
2321
{
2322
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2323
unsigned long started = jiffies;
2324
unsigned long dur;
2325
int ret;
2326
2327
while (time_before_eq(jiffies, timeout)) {
2328
ath10k_pci_bmi_send_done(tx_pipe);
2329
ath10k_pci_bmi_recv_data(rx_pipe);
2330
2331
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2332
ret = 0;
2333
goto out;
2334
}
2335
2336
#if defined(__linux__)
2337
schedule();
2338
#elif defined(__FreeBSD__)
2339
/* Using LinuxKPI we'll hang for-ever as there's no wake_up */
2340
kern_yield(PRI_USER);
2341
#endif
2342
}
2343
2344
ret = -ETIMEDOUT;
2345
2346
out:
2347
dur = jiffies - started;
2348
if (dur > HZ)
2349
ath10k_dbg(ar, ATH10K_DBG_BMI,
2350
"bmi cmd took %lu jiffies hz %d ret %d\n",
2351
dur, HZ, ret);
2352
return ret;
2353
}
2354
2355
/*
2356
* Send an interrupt to the device to wake up the Target CPU
2357
* so it has an opportunity to notice any changed state.
2358
*/
2359
static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2360
{
2361
u32 addr, val;
2362
2363
addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2364
val = ath10k_pci_read32(ar, addr);
2365
val |= CORE_CTRL_CPU_INTR_MASK;
2366
ath10k_pci_write32(ar, addr, val);
2367
2368
return 0;
2369
}
2370
2371
static int ath10k_pci_get_num_banks(struct ath10k *ar)
2372
{
2373
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2374
2375
switch (ar_pci->pdev->device) {
2376
case QCA988X_2_0_DEVICE_ID_UBNT:
2377
case QCA988X_2_0_DEVICE_ID:
2378
case QCA99X0_2_0_DEVICE_ID:
2379
case QCA9888_2_0_DEVICE_ID:
2380
case QCA9984_1_0_DEVICE_ID:
2381
case QCA9887_1_0_DEVICE_ID:
2382
return 1;
2383
case QCA6164_2_1_DEVICE_ID:
2384
case QCA6174_2_1_DEVICE_ID:
2385
switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2386
case QCA6174_HW_1_0_CHIP_ID_REV:
2387
case QCA6174_HW_1_1_CHIP_ID_REV:
2388
case QCA6174_HW_2_1_CHIP_ID_REV:
2389
case QCA6174_HW_2_2_CHIP_ID_REV:
2390
return 3;
2391
case QCA6174_HW_1_3_CHIP_ID_REV:
2392
return 2;
2393
case QCA6174_HW_3_0_CHIP_ID_REV:
2394
case QCA6174_HW_3_1_CHIP_ID_REV:
2395
case QCA6174_HW_3_2_CHIP_ID_REV:
2396
return 9;
2397
}
2398
break;
2399
case QCA9377_1_0_DEVICE_ID:
2400
return 9;
2401
}
2402
2403
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2404
return 1;
2405
}
2406
2407
static int ath10k_bus_get_num_banks(struct ath10k *ar)
2408
{
2409
struct ath10k_ce *ce = ath10k_ce_priv(ar);
2410
2411
return ce->bus_ops->get_num_banks(ar);
2412
}
2413
2414
int ath10k_pci_init_config(struct ath10k *ar)
2415
{
2416
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2417
u32 interconnect_targ_addr;
2418
u32 pcie_state_targ_addr = 0;
2419
u32 pipe_cfg_targ_addr = 0;
2420
u32 svc_to_pipe_map = 0;
2421
u32 pcie_config_flags = 0;
2422
u32 ealloc_value;
2423
u32 ealloc_targ_addr;
2424
u32 flag2_value;
2425
u32 flag2_targ_addr;
2426
int ret = 0;
2427
2428
/* Download to Target the CE Config and the service-to-CE map */
2429
interconnect_targ_addr =
2430
host_interest_item_address(HI_ITEM(hi_interconnect_state));
2431
2432
/* Supply Target-side CE configuration */
2433
ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2434
&pcie_state_targ_addr);
2435
if (ret != 0) {
2436
ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2437
return ret;
2438
}
2439
2440
if (pcie_state_targ_addr == 0) {
2441
ret = -EIO;
2442
ath10k_err(ar, "Invalid pcie state addr\n");
2443
return ret;
2444
}
2445
2446
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2447
offsetof(struct pcie_state,
2448
pipe_cfg_addr)),
2449
&pipe_cfg_targ_addr);
2450
if (ret != 0) {
2451
ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2452
return ret;
2453
}
2454
2455
if (pipe_cfg_targ_addr == 0) {
2456
ret = -EIO;
2457
ath10k_err(ar, "Invalid pipe cfg addr\n");
2458
return ret;
2459
}
2460
2461
ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2462
ar_pci->pipe_config,
2463
sizeof(struct ce_pipe_config) *
2464
NUM_TARGET_CE_CONFIG_WLAN);
2465
2466
if (ret != 0) {
2467
ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2468
return ret;
2469
}
2470
2471
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2472
offsetof(struct pcie_state,
2473
svc_to_pipe_map)),
2474
&svc_to_pipe_map);
2475
if (ret != 0) {
2476
ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2477
return ret;
2478
}
2479
2480
if (svc_to_pipe_map == 0) {
2481
ret = -EIO;
2482
ath10k_err(ar, "Invalid svc_to_pipe map\n");
2483
return ret;
2484
}
2485
2486
ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2487
ar_pci->serv_to_pipe,
2488
sizeof(pci_target_service_to_ce_map_wlan));
2489
if (ret != 0) {
2490
ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2491
return ret;
2492
}
2493
2494
ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2495
offsetof(struct pcie_state,
2496
config_flags)),
2497
&pcie_config_flags);
2498
if (ret != 0) {
2499
ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2500
return ret;
2501
}
2502
2503
pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2504
2505
ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2506
offsetof(struct pcie_state,
2507
config_flags)),
2508
pcie_config_flags);
2509
if (ret != 0) {
2510
ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2511
return ret;
2512
}
2513
2514
/* configure early allocation */
2515
ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2516
2517
ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2518
if (ret != 0) {
2519
ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2520
return ret;
2521
}
2522
2523
/* first bank is switched to IRAM */
2524
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2525
HI_EARLY_ALLOC_MAGIC_MASK);
2526
ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2527
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2528
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2529
2530
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2531
if (ret != 0) {
2532
ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2533
return ret;
2534
}
2535
2536
/* Tell Target to proceed with initialization */
2537
flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2538
2539
ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2540
if (ret != 0) {
2541
ath10k_err(ar, "Failed to get option val: %d\n", ret);
2542
return ret;
2543
}
2544
2545
flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2546
2547
ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2548
if (ret != 0) {
2549
ath10k_err(ar, "Failed to set option val: %d\n", ret);
2550
return ret;
2551
}
2552
2553
return 0;
2554
}
2555
2556
static void ath10k_pci_override_ce_config(struct ath10k *ar)
2557
{
2558
struct ce_attr *attr;
2559
struct ce_pipe_config *config;
2560
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2561
2562
/* For QCA6174 we're overriding the Copy Engine 5 configuration,
2563
* since it is currently used for other feature.
2564
*/
2565
2566
/* Override Host's Copy Engine 5 configuration */
2567
attr = &ar_pci->attr[5];
2568
attr->src_sz_max = 0;
2569
attr->dest_nentries = 0;
2570
2571
/* Override Target firmware's Copy Engine configuration */
2572
config = &ar_pci->pipe_config[5];
2573
config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2574
config->nbytes_max = __cpu_to_le32(2048);
2575
2576
/* Map from service/endpoint to Copy Engine */
2577
ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
2578
}
2579
2580
int ath10k_pci_alloc_pipes(struct ath10k *ar)
2581
{
2582
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2583
struct ath10k_pci_pipe *pipe;
2584
struct ath10k_ce *ce = ath10k_ce_priv(ar);
2585
int i, ret;
2586
2587
for (i = 0; i < CE_COUNT; i++) {
2588
pipe = &ar_pci->pipe_info[i];
2589
pipe->ce_hdl = &ce->ce_states[i];
2590
pipe->pipe_num = i;
2591
pipe->hif_ce_state = ar;
2592
2593
ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
2594
if (ret) {
2595
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2596
i, ret);
2597
return ret;
2598
}
2599
2600
/* Last CE is Diagnostic Window */
2601
if (i == CE_DIAG_PIPE) {
2602
ar_pci->ce_diag = pipe->ce_hdl;
2603
continue;
2604
}
2605
2606
pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
2607
}
2608
2609
return 0;
2610
}
2611
2612
void ath10k_pci_free_pipes(struct ath10k *ar)
2613
{
2614
int i;
2615
2616
for (i = 0; i < CE_COUNT; i++)
2617
ath10k_ce_free_pipe(ar, i);
2618
}
2619
2620
int ath10k_pci_init_pipes(struct ath10k *ar)
2621
{
2622
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2623
int i, ret;
2624
2625
for (i = 0; i < CE_COUNT; i++) {
2626
ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
2627
if (ret) {
2628
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2629
i, ret);
2630
return ret;
2631
}
2632
}
2633
2634
return 0;
2635
}
2636
2637
static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2638
{
2639
return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2640
FW_IND_EVENT_PENDING;
2641
}
2642
2643
static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2644
{
2645
u32 val;
2646
2647
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2648
val &= ~FW_IND_EVENT_PENDING;
2649
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2650
}
2651
2652
static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2653
{
2654
u32 val;
2655
2656
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2657
return (val == 0xffffffff);
2658
}
2659
2660
/* this function effectively clears target memory controller assert line */
2661
static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2662
{
2663
u32 val;
2664
2665
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2666
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2667
val | SOC_RESET_CONTROL_SI0_RST_MASK);
2668
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2669
2670
msleep(10);
2671
2672
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2673
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2674
val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2675
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2676
2677
msleep(10);
2678
}
2679
2680
static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2681
{
2682
u32 val;
2683
2684
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2685
2686
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2687
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2688
val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2689
}
2690
2691
static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2692
{
2693
u32 val;
2694
2695
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2696
2697
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2698
val | SOC_RESET_CONTROL_CE_RST_MASK);
2699
msleep(10);
2700
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2701
val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2702
}
2703
2704
static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2705
{
2706
u32 val;
2707
2708
val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
2709
ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
2710
val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2711
}
2712
2713
static int ath10k_pci_warm_reset(struct ath10k *ar)
2714
{
2715
int ret;
2716
2717
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2718
2719
spin_lock_bh(&ar->data_lock);
2720
ar->stats.fw_warm_reset_counter++;
2721
spin_unlock_bh(&ar->data_lock);
2722
2723
ath10k_pci_irq_disable(ar);
2724
2725
/* Make sure the target CPU is not doing anything dangerous, e.g. if it
2726
* were to access copy engine while host performs copy engine reset
2727
* then it is possible for the device to confuse pci-e controller to
2728
* the point of bringing host system to a complete stop (i.e. hang).
2729
*/
2730
ath10k_pci_warm_reset_si0(ar);
2731
ath10k_pci_warm_reset_cpu(ar);
2732
ath10k_pci_init_pipes(ar);
2733
ath10k_pci_wait_for_target_init(ar);
2734
2735
ath10k_pci_warm_reset_clear_lf(ar);
2736
ath10k_pci_warm_reset_ce(ar);
2737
ath10k_pci_warm_reset_cpu(ar);
2738
ath10k_pci_init_pipes(ar);
2739
2740
ret = ath10k_pci_wait_for_target_init(ar);
2741
if (ret) {
2742
ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2743
return ret;
2744
}
2745
2746
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2747
2748
return 0;
2749
}
2750
2751
static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2752
{
2753
ath10k_pci_irq_disable(ar);
2754
return ath10k_pci_qca99x0_chip_reset(ar);
2755
}
2756
2757
static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2758
{
2759
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2760
2761
if (!ar_pci->pci_soft_reset)
2762
return -ENOTSUPP;
2763
2764
return ar_pci->pci_soft_reset(ar);
2765
}
2766
2767
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2768
{
2769
int i, ret;
2770
u32 val;
2771
2772
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2773
2774
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2775
* It is thus preferred to use warm reset which is safer but may not be
2776
* able to recover the device from all possible fail scenarios.
2777
*
2778
* Warm reset doesn't always work on first try so attempt it a few
2779
* times before giving up.
2780
*/
2781
for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2782
ret = ath10k_pci_warm_reset(ar);
2783
if (ret) {
2784
ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2785
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2786
ret);
2787
continue;
2788
}
2789
2790
/* FIXME: Sometimes copy engine doesn't recover after warm
2791
* reset. In most cases this needs cold reset. In some of these
2792
* cases the device is in such a state that a cold reset may
2793
* lock up the host.
2794
*
2795
* Reading any host interest register via copy engine is
2796
* sufficient to verify if device is capable of booting
2797
* firmware blob.
2798
*/
2799
ret = ath10k_pci_init_pipes(ar);
2800
if (ret) {
2801
ath10k_warn(ar, "failed to init copy engine: %d\n",
2802
ret);
2803
continue;
2804
}
2805
2806
ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2807
&val);
2808
if (ret) {
2809
ath10k_warn(ar, "failed to poke copy engine: %d\n",
2810
ret);
2811
continue;
2812
}
2813
2814
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2815
return 0;
2816
}
2817
2818
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2819
ath10k_warn(ar, "refusing cold reset as requested\n");
2820
return -EPERM;
2821
}
2822
2823
ret = ath10k_pci_cold_reset(ar);
2824
if (ret) {
2825
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2826
return ret;
2827
}
2828
2829
ret = ath10k_pci_wait_for_target_init(ar);
2830
if (ret) {
2831
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2832
ret);
2833
return ret;
2834
}
2835
2836
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2837
2838
return 0;
2839
}
2840
2841
static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2842
{
2843
int ret;
2844
2845
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2846
2847
/* FIXME: QCA6174 requires cold + warm reset to work. */
2848
2849
ret = ath10k_pci_cold_reset(ar);
2850
if (ret) {
2851
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2852
return ret;
2853
}
2854
2855
ret = ath10k_pci_wait_for_target_init(ar);
2856
if (ret) {
2857
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2858
ret);
2859
return ret;
2860
}
2861
2862
ret = ath10k_pci_warm_reset(ar);
2863
if (ret) {
2864
ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2865
return ret;
2866
}
2867
2868
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2869
2870
return 0;
2871
}
2872
2873
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2874
{
2875
int ret;
2876
2877
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2878
2879
ret = ath10k_pci_cold_reset(ar);
2880
if (ret) {
2881
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2882
return ret;
2883
}
2884
2885
ret = ath10k_pci_wait_for_target_init(ar);
2886
if (ret) {
2887
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2888
ret);
2889
return ret;
2890
}
2891
2892
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2893
2894
return 0;
2895
}
2896
2897
static int ath10k_pci_chip_reset(struct ath10k *ar)
2898
{
2899
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2900
2901
if (WARN_ON(!ar_pci->pci_hard_reset))
2902
return -ENOTSUPP;
2903
2904
return ar_pci->pci_hard_reset(ar);
2905
}
2906
2907
static int ath10k_pci_hif_power_up(struct ath10k *ar,
2908
enum ath10k_firmware_mode fw_mode)
2909
{
2910
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2911
int ret;
2912
2913
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2914
2915
pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2916
&ar_pci->link_ctl);
2917
pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2918
ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2919
2920
/*
2921
* Bring the target up cleanly.
2922
*
2923
* The target may be in an undefined state with an AUX-powered Target
2924
* and a Host in WoW mode. If the Host crashes, loses power, or is
2925
* restarted (without unloading the driver) then the Target is left
2926
* (aux) powered and running. On a subsequent driver load, the Target
2927
* is in an unexpected state. We try to catch that here in order to
2928
* reset the Target and retry the probe.
2929
*/
2930
ret = ath10k_pci_chip_reset(ar);
2931
if (ret) {
2932
if (ath10k_pci_has_fw_crashed(ar)) {
2933
ath10k_warn(ar, "firmware crashed during chip reset\n");
2934
ath10k_pci_fw_crashed_clear(ar);
2935
ath10k_pci_fw_crashed_dump(ar);
2936
}
2937
2938
ath10k_err(ar, "failed to reset chip: %d\n", ret);
2939
goto err_sleep;
2940
}
2941
2942
ret = ath10k_pci_init_pipes(ar);
2943
if (ret) {
2944
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2945
goto err_sleep;
2946
}
2947
2948
ret = ath10k_pci_init_config(ar);
2949
if (ret) {
2950
ath10k_err(ar, "failed to setup init config: %d\n", ret);
2951
goto err_ce;
2952
}
2953
2954
ret = ath10k_pci_wake_target_cpu(ar);
2955
if (ret) {
2956
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2957
goto err_ce;
2958
}
2959
2960
return 0;
2961
2962
err_ce:
2963
ath10k_pci_ce_deinit(ar);
2964
2965
err_sleep:
2966
return ret;
2967
}
2968
2969
void ath10k_pci_hif_power_down(struct ath10k *ar)
2970
{
2971
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2972
2973
/* Currently hif_power_up performs effectively a reset and hif_stop
2974
* resets the chip as well so there's no point in resetting here.
2975
*/
2976
}
2977
2978
static int ath10k_pci_hif_suspend(struct ath10k *ar)
2979
{
2980
/* Nothing to do; the important stuff is in the driver suspend. */
2981
return 0;
2982
}
2983
2984
#ifdef CONFIG_PM
2985
static int ath10k_pci_suspend(struct ath10k *ar)
2986
{
2987
/* The grace timer can still be counting down and ar->ps_awake be true.
2988
* It is known that the device may be asleep after resuming regardless
2989
* of the SoC powersave state before suspending. Hence make sure the
2990
* device is asleep before proceeding.
2991
*/
2992
ath10k_pci_sleep_sync(ar);
2993
2994
return 0;
2995
}
2996
#endif
2997
2998
static int ath10k_pci_hif_resume(struct ath10k *ar)
2999
{
3000
/* Nothing to do; the important stuff is in the driver resume. */
3001
return 0;
3002
}
3003
3004
#ifdef CONFIG_PM
3005
static int ath10k_pci_resume(struct ath10k *ar)
3006
{
3007
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3008
struct pci_dev *pdev = ar_pci->pdev;
3009
u32 val;
3010
int ret = 0;
3011
3012
ret = ath10k_pci_force_wake(ar);
3013
if (ret) {
3014
ath10k_err(ar, "failed to wake up target: %d\n", ret);
3015
return ret;
3016
}
3017
3018
/* Suspend/Resume resets the PCI configuration space, so we have to
3019
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
3020
* from interfering with C3 CPU state. pci_restore_state won't help
3021
* here since it only restores the first 64 bytes pci config header.
3022
*/
3023
pci_read_config_dword(pdev, 0x40, &val);
3024
if ((val & 0x0000ff00) != 0)
3025
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
3026
3027
return ret;
3028
}
3029
#endif
3030
3031
static bool ath10k_pci_validate_cal(void *data, size_t size)
3032
{
3033
__le16 *cal_words = data;
3034
u16 checksum = 0;
3035
size_t i;
3036
3037
if (size % 2 != 0)
3038
return false;
3039
3040
for (i = 0; i < size / 2; i++)
3041
checksum ^= le16_to_cpu(cal_words[i]);
3042
3043
return checksum == 0xffff;
3044
}
3045
3046
static void ath10k_pci_enable_eeprom(struct ath10k *ar)
3047
{
3048
/* Enable SI clock */
3049
ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
3050
3051
/* Configure GPIOs for I2C operation */
3052
ath10k_pci_write32(ar,
3053
GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
3054
4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
3055
SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
3056
GPIO_PIN0_CONFIG) |
3057
SM(1, GPIO_PIN0_PAD_PULL));
3058
3059
ath10k_pci_write32(ar,
3060
GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
3061
4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
3062
SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
3063
SM(1, GPIO_PIN0_PAD_PULL));
3064
3065
ath10k_pci_write32(ar,
3066
GPIO_BASE_ADDRESS +
3067
QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
3068
1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
3069
3070
/* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
3071
ath10k_pci_write32(ar,
3072
SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
3073
SM(1, SI_CONFIG_ERR_INT) |
3074
SM(1, SI_CONFIG_BIDIR_OD_DATA) |
3075
SM(1, SI_CONFIG_I2C) |
3076
SM(1, SI_CONFIG_POS_SAMPLE) |
3077
SM(1, SI_CONFIG_INACTIVE_DATA) |
3078
SM(1, SI_CONFIG_INACTIVE_CLK) |
3079
SM(8, SI_CONFIG_DIVIDER));
3080
}
3081
3082
static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
3083
{
3084
u32 reg;
3085
int wait_limit;
3086
3087
/* set device select byte and for the read operation */
3088
reg = QCA9887_EEPROM_SELECT_READ |
3089
SM(addr, QCA9887_EEPROM_ADDR_LO) |
3090
SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
3091
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
3092
3093
/* write transmit data, transfer length, and START bit */
3094
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
3095
SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
3096
SM(4, SI_CS_TX_CNT));
3097
3098
/* wait max 1 sec */
3099
wait_limit = 100000;
3100
3101
/* wait for SI_CS_DONE_INT */
3102
do {
3103
reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3104
if (MS(reg, SI_CS_DONE_INT))
3105
break;
3106
3107
wait_limit--;
3108
udelay(10);
3109
} while (wait_limit > 0);
3110
3111
if (!MS(reg, SI_CS_DONE_INT)) {
3112
ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3113
addr);
3114
return -ETIMEDOUT;
3115
}
3116
3117
/* clear SI_CS_DONE_INT */
3118
ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3119
3120
if (MS(reg, SI_CS_DONE_ERR)) {
3121
ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3122
return -EIO;
3123
}
3124
3125
/* extract receive data */
3126
reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3127
*out = reg;
3128
3129
return 0;
3130
}
3131
3132
static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3133
size_t *data_len)
3134
{
3135
u8 *caldata = NULL;
3136
size_t calsize, i;
3137
int ret;
3138
3139
if (!QCA_REV_9887(ar))
3140
return -EOPNOTSUPP;
3141
3142
calsize = ar->hw_params.cal_data_len;
3143
caldata = kmalloc(calsize, GFP_KERNEL);
3144
if (!caldata)
3145
return -ENOMEM;
3146
3147
ath10k_pci_enable_eeprom(ar);
3148
3149
for (i = 0; i < calsize; i++) {
3150
ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3151
if (ret)
3152
goto err_free;
3153
}
3154
3155
if (!ath10k_pci_validate_cal(caldata, calsize))
3156
goto err_free;
3157
3158
*data = caldata;
3159
*data_len = calsize;
3160
3161
return 0;
3162
3163
err_free:
3164
kfree(caldata);
3165
3166
return -EINVAL;
3167
}
3168
3169
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3170
.tx_sg = ath10k_pci_hif_tx_sg,
3171
.diag_read = ath10k_pci_hif_diag_read,
3172
.diag_write = ath10k_pci_diag_write_mem,
3173
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
3174
.start = ath10k_pci_hif_start,
3175
.stop = ath10k_pci_hif_stop,
3176
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
3177
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
3178
.send_complete_check = ath10k_pci_hif_send_complete_check,
3179
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
3180
.power_up = ath10k_pci_hif_power_up,
3181
.power_down = ath10k_pci_hif_power_down,
3182
.read32 = ath10k_pci_read32,
3183
.write32 = ath10k_pci_write32,
3184
.suspend = ath10k_pci_hif_suspend,
3185
.resume = ath10k_pci_hif_resume,
3186
.fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
3187
};
3188
3189
/*
3190
* Top-level interrupt handler for all PCI interrupts from a Target.
3191
* When a block of MSI interrupts is allocated, this top-level handler
3192
* is not used; instead, we directly call the correct sub-handler.
3193
*/
3194
static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3195
{
3196
struct ath10k *ar = arg;
3197
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3198
int ret;
3199
3200
if (ath10k_pci_has_device_gone(ar))
3201
return IRQ_NONE;
3202
3203
ret = ath10k_pci_force_wake(ar);
3204
if (ret) {
3205
ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3206
return IRQ_NONE;
3207
}
3208
3209
if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3210
!ath10k_pci_irq_pending(ar))
3211
return IRQ_NONE;
3212
3213
ath10k_pci_disable_and_clear_legacy_irq(ar);
3214
ath10k_pci_irq_msi_fw_mask(ar);
3215
napi_schedule(&ar->napi);
3216
3217
return IRQ_HANDLED;
3218
}
3219
3220
static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3221
{
3222
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3223
int done = 0;
3224
3225
if (ath10k_pci_has_fw_crashed(ar)) {
3226
ath10k_pci_fw_crashed_clear(ar);
3227
ath10k_pci_fw_crashed_dump(ar);
3228
napi_complete(ctx);
3229
return done;
3230
}
3231
3232
ath10k_ce_per_engine_service_any(ar);
3233
3234
done = ath10k_htt_txrx_compl_task(ar, budget);
3235
3236
if (done < budget) {
3237
napi_complete_done(ctx, done);
3238
/* In case of MSI, it is possible that interrupts are received
3239
* while NAPI poll is inprogress. So pending interrupts that are
3240
* received after processing all copy engine pipes by NAPI poll
3241
* will not be handled again. This is causing failure to
3242
* complete boot sequence in x86 platform. So before enabling
3243
* interrupts safer to check for pending interrupts for
3244
* immediate servicing.
3245
*/
3246
if (ath10k_ce_interrupt_summary(ar)) {
3247
napi_reschedule(ctx);
3248
goto out;
3249
}
3250
ath10k_pci_enable_legacy_irq(ar);
3251
ath10k_pci_irq_msi_fw_unmask(ar);
3252
}
3253
3254
out:
3255
return done;
3256
}
3257
3258
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3259
{
3260
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3261
int ret;
3262
3263
ret = request_irq(ar_pci->pdev->irq,
3264
ath10k_pci_interrupt_handler,
3265
IRQF_SHARED, "ath10k_pci", ar);
3266
if (ret) {
3267
ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3268
ar_pci->pdev->irq, ret);
3269
return ret;
3270
}
3271
3272
return 0;
3273
}
3274
3275
static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3276
{
3277
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3278
int ret;
3279
3280
ret = request_irq(ar_pci->pdev->irq,
3281
ath10k_pci_interrupt_handler,
3282
IRQF_SHARED, "ath10k_pci", ar);
3283
if (ret) {
3284
ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3285
ar_pci->pdev->irq, ret);
3286
return ret;
3287
}
3288
3289
return 0;
3290
}
3291
3292
static int ath10k_pci_request_irq(struct ath10k *ar)
3293
{
3294
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3295
3296
switch (ar_pci->oper_irq_mode) {
3297
case ATH10K_PCI_IRQ_LEGACY:
3298
return ath10k_pci_request_irq_legacy(ar);
3299
case ATH10K_PCI_IRQ_MSI:
3300
return ath10k_pci_request_irq_msi(ar);
3301
default:
3302
return -EINVAL;
3303
}
3304
}
3305
3306
static void ath10k_pci_free_irq(struct ath10k *ar)
3307
{
3308
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3309
3310
free_irq(ar_pci->pdev->irq, ar);
3311
}
3312
3313
void ath10k_pci_init_napi(struct ath10k *ar)
3314
{
3315
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
3316
}
3317
3318
static int ath10k_pci_init_irq(struct ath10k *ar)
3319
{
3320
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3321
int ret;
3322
3323
ath10k_pci_init_napi(ar);
3324
3325
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3326
ath10k_info(ar, "limiting irq mode to: %d\n",
3327
ath10k_pci_irq_mode);
3328
3329
/* Try MSI */
3330
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3331
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3332
ret = pci_enable_msi(ar_pci->pdev);
3333
if (ret == 0)
3334
return 0;
3335
3336
/* MHI failed, try legacy irq next */
3337
}
3338
3339
/* Try legacy irq
3340
*
3341
* A potential race occurs here: The CORE_BASE write
3342
* depends on target correctly decoding AXI address but
3343
* host won't know when target writes BAR to CORE_CTRL.
3344
* This write might get lost if target has NOT written BAR.
3345
* For now, fix the race by repeating the write in below
3346
* synchronization checking.
3347
*/
3348
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3349
3350
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3351
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3352
3353
return 0;
3354
}
3355
3356
static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3357
{
3358
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3359
0);
3360
}
3361
3362
static int ath10k_pci_deinit_irq(struct ath10k *ar)
3363
{
3364
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3365
3366
switch (ar_pci->oper_irq_mode) {
3367
case ATH10K_PCI_IRQ_LEGACY:
3368
ath10k_pci_deinit_irq_legacy(ar);
3369
break;
3370
default:
3371
pci_disable_msi(ar_pci->pdev);
3372
break;
3373
}
3374
3375
return 0;
3376
}
3377
3378
int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3379
{
3380
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3381
unsigned long timeout;
3382
u32 val;
3383
3384
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3385
3386
timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3387
3388
do {
3389
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3390
3391
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3392
val);
3393
3394
/* target should never return this */
3395
if (val == 0xffffffff)
3396
continue;
3397
3398
/* the device has crashed so don't bother trying anymore */
3399
if (val & FW_IND_EVENT_PENDING)
3400
break;
3401
3402
if (val & FW_IND_INITIALIZED)
3403
break;
3404
3405
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3406
/* Fix potential race by repeating CORE_BASE writes */
3407
ath10k_pci_enable_legacy_irq(ar);
3408
3409
mdelay(10);
3410
} while (time_before(jiffies, timeout));
3411
3412
ath10k_pci_disable_and_clear_legacy_irq(ar);
3413
ath10k_pci_irq_msi_fw_mask(ar);
3414
3415
if (val == 0xffffffff) {
3416
ath10k_err(ar, "failed to read device register, device is gone\n");
3417
return -EIO;
3418
}
3419
3420
if (val & FW_IND_EVENT_PENDING) {
3421
ath10k_warn(ar, "device has crashed during init\n");
3422
return -ECOMM;
3423
}
3424
3425
if (!(val & FW_IND_INITIALIZED)) {
3426
ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3427
val);
3428
return -ETIMEDOUT;
3429
}
3430
3431
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3432
return 0;
3433
}
3434
3435
static int ath10k_pci_cold_reset(struct ath10k *ar)
3436
{
3437
u32 val;
3438
3439
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3440
3441
spin_lock_bh(&ar->data_lock);
3442
3443
ar->stats.fw_cold_reset_counter++;
3444
3445
spin_unlock_bh(&ar->data_lock);
3446
3447
/* Put Target, including PCIe, into RESET. */
3448
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3449
val |= 1;
3450
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3451
3452
/* After writing into SOC_GLOBAL_RESET to put device into
3453
* reset and pulling out of reset pcie may not be stable
3454
* for any immediate pcie register access and cause bus error,
3455
* add delay before any pcie access request to fix this issue.
3456
*/
3457
msleep(20);
3458
3459
/* Pull Target, including PCIe, out of RESET. */
3460
val &= ~1;
3461
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3462
3463
msleep(20);
3464
3465
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3466
3467
return 0;
3468
}
3469
3470
static int ath10k_pci_claim(struct ath10k *ar)
3471
{
3472
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3473
struct pci_dev *pdev = ar_pci->pdev;
3474
int ret;
3475
3476
pci_set_drvdata(pdev, ar);
3477
3478
ret = pci_enable_device(pdev);
3479
if (ret) {
3480
ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3481
return ret;
3482
}
3483
3484
ret = pci_request_region(pdev, BAR_NUM, "ath");
3485
if (ret) {
3486
ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3487
ret);
3488
goto err_device;
3489
}
3490
3491
/* Target expects 32 bit DMA. Enforce it. */
3492
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3493
if (ret) {
3494
ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3495
goto err_region;
3496
}
3497
3498
pci_set_master(pdev);
3499
3500
#if defined(__FreeBSD__)
3501
linuxkpi_pcim_want_to_use_bus_functions(pdev);
3502
#endif
3503
3504
/* Arrange for access to Target SoC registers. */
3505
ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3506
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3507
if (!ar_pci->mem) {
3508
ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3509
ret = -EIO;
3510
goto err_region;
3511
}
3512
3513
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3514
return 0;
3515
3516
err_region:
3517
pci_release_region(pdev, BAR_NUM);
3518
3519
err_device:
3520
pci_disable_device(pdev);
3521
3522
return ret;
3523
}
3524
3525
static void ath10k_pci_release(struct ath10k *ar)
3526
{
3527
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3528
struct pci_dev *pdev = ar_pci->pdev;
3529
3530
pci_iounmap(pdev, ar_pci->mem);
3531
pci_release_region(pdev, BAR_NUM);
3532
pci_disable_device(pdev);
3533
}
3534
3535
static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3536
{
3537
const struct ath10k_pci_supp_chip *supp_chip;
3538
int i;
3539
u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3540
3541
for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3542
supp_chip = &ath10k_pci_supp_chips[i];
3543
3544
if (supp_chip->dev_id == dev_id &&
3545
supp_chip->rev_id == rev_id)
3546
return true;
3547
}
3548
3549
return false;
3550
}
3551
3552
int ath10k_pci_setup_resource(struct ath10k *ar)
3553
{
3554
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3555
struct ath10k_ce *ce = ath10k_ce_priv(ar);
3556
int ret;
3557
3558
spin_lock_init(&ce->ce_lock);
3559
spin_lock_init(&ar_pci->ps_lock);
3560
mutex_init(&ar_pci->ce_diag_mutex);
3561
3562
INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3563
3564
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3565
3566
ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
3567
sizeof(pci_host_ce_config_wlan),
3568
GFP_KERNEL);
3569
if (!ar_pci->attr)
3570
return -ENOMEM;
3571
3572
ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
3573
sizeof(pci_target_ce_config_wlan),
3574
GFP_KERNEL);
3575
if (!ar_pci->pipe_config) {
3576
ret = -ENOMEM;
3577
goto err_free_attr;
3578
}
3579
3580
ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
3581
sizeof(pci_target_service_to_ce_map_wlan),
3582
GFP_KERNEL);
3583
if (!ar_pci->serv_to_pipe) {
3584
ret = -ENOMEM;
3585
goto err_free_pipe_config;
3586
}
3587
3588
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3589
ath10k_pci_override_ce_config(ar);
3590
3591
ret = ath10k_pci_alloc_pipes(ar);
3592
if (ret) {
3593
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3594
ret);
3595
goto err_free_serv_to_pipe;
3596
}
3597
3598
return 0;
3599
3600
err_free_serv_to_pipe:
3601
kfree(ar_pci->serv_to_pipe);
3602
err_free_pipe_config:
3603
kfree(ar_pci->pipe_config);
3604
err_free_attr:
3605
kfree(ar_pci->attr);
3606
return ret;
3607
}
3608
3609
void ath10k_pci_release_resource(struct ath10k *ar)
3610
{
3611
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3612
3613
ath10k_pci_rx_retry_sync(ar);
3614
netif_napi_del(&ar->napi);
3615
ath10k_pci_ce_deinit(ar);
3616
ath10k_pci_free_pipes(ar);
3617
kfree(ar_pci->attr);
3618
kfree(ar_pci->pipe_config);
3619
kfree(ar_pci->serv_to_pipe);
3620
}
3621
3622
static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3623
.read32 = ath10k_bus_pci_read32,
3624
.write32 = ath10k_bus_pci_write32,
3625
.get_num_banks = ath10k_pci_get_num_banks,
3626
};
3627
3628
static int ath10k_pci_probe(struct pci_dev *pdev,
3629
const struct pci_device_id *pci_dev)
3630
{
3631
int ret = 0;
3632
struct ath10k *ar;
3633
struct ath10k_pci *ar_pci;
3634
enum ath10k_hw_rev hw_rev;
3635
struct ath10k_bus_params bus_params = {};
3636
bool pci_ps, is_qca988x = false;
3637
int (*pci_soft_reset)(struct ath10k *ar);
3638
int (*pci_hard_reset)(struct ath10k *ar);
3639
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3640
3641
switch (pci_dev->device) {
3642
case QCA988X_2_0_DEVICE_ID_UBNT:
3643
case QCA988X_2_0_DEVICE_ID:
3644
hw_rev = ATH10K_HW_QCA988X;
3645
pci_ps = false;
3646
is_qca988x = true;
3647
pci_soft_reset = ath10k_pci_warm_reset;
3648
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3649
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3650
break;
3651
case QCA9887_1_0_DEVICE_ID:
3652
hw_rev = ATH10K_HW_QCA9887;
3653
pci_ps = false;
3654
pci_soft_reset = ath10k_pci_warm_reset;
3655
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3656
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3657
break;
3658
case QCA6164_2_1_DEVICE_ID:
3659
case QCA6174_2_1_DEVICE_ID:
3660
hw_rev = ATH10K_HW_QCA6174;
3661
pci_ps = true;
3662
pci_soft_reset = ath10k_pci_warm_reset;
3663
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3664
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3665
break;
3666
case QCA99X0_2_0_DEVICE_ID:
3667
hw_rev = ATH10K_HW_QCA99X0;
3668
pci_ps = false;
3669
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3670
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3671
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3672
break;
3673
case QCA9984_1_0_DEVICE_ID:
3674
hw_rev = ATH10K_HW_QCA9984;
3675
pci_ps = false;
3676
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3677
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3678
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3679
break;
3680
case QCA9888_2_0_DEVICE_ID:
3681
hw_rev = ATH10K_HW_QCA9888;
3682
pci_ps = false;
3683
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3684
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3685
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3686
break;
3687
case QCA9377_1_0_DEVICE_ID:
3688
hw_rev = ATH10K_HW_QCA9377;
3689
pci_ps = true;
3690
pci_soft_reset = ath10k_pci_warm_reset;
3691
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3692
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3693
break;
3694
default:
3695
WARN_ON(1);
3696
return -ENOTSUPP;
3697
}
3698
3699
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3700
hw_rev, &ath10k_pci_hif_ops);
3701
if (!ar) {
3702
dev_err(&pdev->dev, "failed to allocate core\n");
3703
return -ENOMEM;
3704
}
3705
3706
ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3707
pdev->vendor, pdev->device,
3708
pdev->subsystem_vendor, pdev->subsystem_device);
3709
3710
ar_pci = ath10k_pci_priv(ar);
3711
ar_pci->pdev = pdev;
3712
ar_pci->dev = &pdev->dev;
3713
ar_pci->ar = ar;
3714
ar->dev_id = pci_dev->device;
3715
ar_pci->pci_ps = pci_ps;
3716
ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3717
ar_pci->pci_soft_reset = pci_soft_reset;
3718
ar_pci->pci_hard_reset = pci_hard_reset;
3719
ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3720
ar->ce_priv = &ar_pci->ce;
3721
3722
ar->id.vendor = pdev->vendor;
3723
ar->id.device = pdev->device;
3724
ar->id.subsystem_vendor = pdev->subsystem_vendor;
3725
ar->id.subsystem_device = pdev->subsystem_device;
3726
3727
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3728
3729
ret = ath10k_pci_setup_resource(ar);
3730
if (ret) {
3731
ath10k_err(ar, "failed to setup resource: %d\n", ret);
3732
goto err_core_destroy;
3733
}
3734
3735
ret = ath10k_pci_claim(ar);
3736
if (ret) {
3737
ath10k_err(ar, "failed to claim device: %d\n", ret);
3738
goto err_free_pipes;
3739
}
3740
3741
ret = ath10k_pci_force_wake(ar);
3742
if (ret) {
3743
ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3744
goto err_sleep;
3745
}
3746
3747
ath10k_pci_ce_deinit(ar);
3748
ath10k_pci_irq_disable(ar);
3749
3750
ret = ath10k_pci_init_irq(ar);
3751
if (ret) {
3752
ath10k_err(ar, "failed to init irqs: %d\n", ret);
3753
goto err_sleep;
3754
}
3755
3756
ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3757
ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3758
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3759
3760
ret = ath10k_pci_request_irq(ar);
3761
if (ret) {
3762
ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3763
goto err_deinit_irq;
3764
}
3765
3766
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3767
bus_params.link_can_suspend = true;
3768
/* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
3769
* fall off the bus during chip_reset. These chips have the same pci
3770
* device id as the QCA9880 BR4A or 2R4E. So that's why the check.
3771
*/
3772
if (is_qca988x) {
3773
bus_params.chip_id =
3774
ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3775
if (bus_params.chip_id != 0xffffffff) {
3776
if (!ath10k_pci_chip_is_supported(pdev->device,
3777
bus_params.chip_id)) {
3778
ret = -ENODEV;
3779
goto err_unsupported;
3780
}
3781
}
3782
}
3783
3784
ret = ath10k_pci_chip_reset(ar);
3785
if (ret) {
3786
ath10k_err(ar, "failed to reset chip: %d\n", ret);
3787
goto err_free_irq;
3788
}
3789
3790
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3791
if (bus_params.chip_id == 0xffffffff) {
3792
ret = -ENODEV;
3793
goto err_unsupported;
3794
}
3795
3796
if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3797
ret = -ENODEV;
3798
goto err_unsupported;
3799
}
3800
3801
ret = ath10k_core_register(ar, &bus_params);
3802
if (ret) {
3803
ath10k_err(ar, "failed to register driver core: %d\n", ret);
3804
goto err_free_irq;
3805
}
3806
3807
return 0;
3808
3809
err_unsupported:
3810
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3811
pdev->device, bus_params.chip_id);
3812
3813
err_free_irq:
3814
ath10k_pci_free_irq(ar);
3815
3816
err_deinit_irq:
3817
ath10k_pci_release_resource(ar);
3818
3819
err_sleep:
3820
ath10k_pci_sleep_sync(ar);
3821
ath10k_pci_release(ar);
3822
3823
err_free_pipes:
3824
ath10k_pci_free_pipes(ar);
3825
3826
err_core_destroy:
3827
ath10k_core_destroy(ar);
3828
3829
return ret;
3830
}
3831
3832
static void ath10k_pci_remove(struct pci_dev *pdev)
3833
{
3834
struct ath10k *ar = pci_get_drvdata(pdev);
3835
3836
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3837
3838
if (!ar)
3839
return;
3840
3841
ath10k_core_unregister(ar);
3842
ath10k_pci_free_irq(ar);
3843
ath10k_pci_deinit_irq(ar);
3844
ath10k_pci_release_resource(ar);
3845
ath10k_pci_sleep_sync(ar);
3846
ath10k_pci_release(ar);
3847
ath10k_core_destroy(ar);
3848
}
3849
3850
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3851
3852
#ifdef CONFIG_PM
3853
static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3854
{
3855
struct ath10k *ar = dev_get_drvdata(dev);
3856
int ret;
3857
3858
ret = ath10k_pci_suspend(ar);
3859
if (ret)
3860
ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3861
3862
return ret;
3863
}
3864
3865
static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3866
{
3867
struct ath10k *ar = dev_get_drvdata(dev);
3868
int ret;
3869
3870
ret = ath10k_pci_resume(ar);
3871
if (ret)
3872
ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3873
3874
return ret;
3875
}
3876
3877
static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3878
ath10k_pci_pm_suspend,
3879
ath10k_pci_pm_resume);
3880
#endif
3881
3882
static struct pci_driver ath10k_pci_driver = {
3883
.name = "ath10k_pci",
3884
.id_table = ath10k_pci_id_table,
3885
.probe = ath10k_pci_probe,
3886
.remove = ath10k_pci_remove,
3887
#ifdef CONFIG_PM
3888
.driver.pm = &ath10k_pci_pm_ops,
3889
#endif
3890
#if defined(__FreeBSD__)
3891
.bsddriver.name = KBUILD_MODNAME,
3892
/* Allow a possible native driver to attach. */
3893
.bsd_probe_return = (BUS_PROBE_DEFAULT - 1),
3894
#endif
3895
};
3896
3897
static int __init ath10k_pci_init(void)
3898
{
3899
int ret1, ret2;
3900
3901
ret1 = pci_register_driver(&ath10k_pci_driver);
3902
if (ret1)
3903
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3904
ret1);
3905
3906
ret2 = ath10k_ahb_init();
3907
if (ret2)
3908
printk(KERN_ERR "ahb init failed: %d\n", ret2);
3909
3910
if (ret1 && ret2)
3911
return ret1;
3912
3913
/* registered to at least one bus */
3914
return 0;
3915
}
3916
module_init(ath10k_pci_init);
3917
3918
static void __exit ath10k_pci_exit(void)
3919
{
3920
pci_unregister_driver(&ath10k_pci_driver);
3921
ath10k_ahb_exit();
3922
}
3923
3924
module_exit(ath10k_pci_exit);
3925
3926
MODULE_AUTHOR("Qualcomm Atheros");
3927
MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3928
MODULE_LICENSE("Dual BSD/GPL");
3929
#if defined(__FreeBSD__)
3930
MODULE_VERSION(ath10k_pci, 1);
3931
MODULE_DEPEND(ath10k_pci, linuxkpi, 1, 1, 1);
3932
MODULE_DEPEND(ath10k_pci, linuxkpi_wlan, 1, 1, 1);
3933
MODULE_DEPEND(ath10k_pci, athk_common, 1, 1, 1);
3934
#ifdef CONFIG_ATH10K_DEBUGFS
3935
MODULE_DEPEND(ath10k_pci, debugfs, 1, 1, 1);
3936
#endif
3937
#endif
3938
3939
/* QCA988x 2.0 firmware files */
3940
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3941
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3942
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3943
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3944
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3945
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3946
3947
/* QCA9887 1.0 firmware files */
3948
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3949
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3950
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3951
3952
/* QCA6174 2.1 firmware files */
3953
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3954
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3955
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3956
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3957
3958
/* QCA6174 3.1 firmware files */
3959
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3960
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3961
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3962
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3963
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3964
3965
/* QCA9377 1.0 firmware files */
3966
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3967
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3968
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3969
3970