Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath11k/ahb.c
107846 views
1
// SPDX-License-Identifier: BSD-3-Clause-Clear
2
/*
3
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5
*/
6
7
#include <linux/module.h>
8
#include <linux/platform_device.h>
9
#include <linux/property.h>
10
#include <linux/of_device.h>
11
#include <linux/of.h>
12
#include <linux/of_reserved_mem.h>
13
#include <linux/dma-mapping.h>
14
#include <linux/iommu.h>
15
#include "ahb.h"
16
#include "debug.h"
17
#include "hif.h"
18
#include "qmi.h"
19
#include <linux/remoteproc.h>
20
#include "pcic.h"
21
#include <linux/soc/qcom/smem.h>
22
#include <linux/soc/qcom/smem_state.h>
23
24
static const struct of_device_id ath11k_ahb_of_match[] = {
25
/* TODO: Should we change the compatible string to something similar
26
* to one that ath10k uses?
27
*/
28
{ .compatible = "qcom,ipq8074-wifi",
29
.data = (void *)ATH11K_HW_IPQ8074,
30
},
31
{ .compatible = "qcom,ipq6018-wifi",
32
.data = (void *)ATH11K_HW_IPQ6018_HW10,
33
},
34
{ .compatible = "qcom,wcn6750-wifi",
35
.data = (void *)ATH11K_HW_WCN6750_HW10,
36
},
37
{ .compatible = "qcom,ipq5018-wifi",
38
.data = (void *)ATH11K_HW_IPQ5018_HW10,
39
},
40
{ }
41
};
42
43
MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
44
45
#define ATH11K_IRQ_CE0_OFFSET 4
46
47
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
48
"misc-pulse1",
49
"misc-latch",
50
"sw-exception",
51
"watchdog",
52
"ce0",
53
"ce1",
54
"ce2",
55
"ce3",
56
"ce4",
57
"ce5",
58
"ce6",
59
"ce7",
60
"ce8",
61
"ce9",
62
"ce10",
63
"ce11",
64
"host2wbm-desc-feed",
65
"host2reo-re-injection",
66
"host2reo-command",
67
"host2rxdma-monitor-ring3",
68
"host2rxdma-monitor-ring2",
69
"host2rxdma-monitor-ring1",
70
"reo2ost-exception",
71
"wbm2host-rx-release",
72
"reo2host-status",
73
"reo2host-destination-ring4",
74
"reo2host-destination-ring3",
75
"reo2host-destination-ring2",
76
"reo2host-destination-ring1",
77
"rxdma2host-monitor-destination-mac3",
78
"rxdma2host-monitor-destination-mac2",
79
"rxdma2host-monitor-destination-mac1",
80
"ppdu-end-interrupts-mac3",
81
"ppdu-end-interrupts-mac2",
82
"ppdu-end-interrupts-mac1",
83
"rxdma2host-monitor-status-ring-mac3",
84
"rxdma2host-monitor-status-ring-mac2",
85
"rxdma2host-monitor-status-ring-mac1",
86
"host2rxdma-host-buf-ring-mac3",
87
"host2rxdma-host-buf-ring-mac2",
88
"host2rxdma-host-buf-ring-mac1",
89
"rxdma2host-destination-ring-mac3",
90
"rxdma2host-destination-ring-mac2",
91
"rxdma2host-destination-ring-mac1",
92
"host2tcl-input-ring4",
93
"host2tcl-input-ring3",
94
"host2tcl-input-ring2",
95
"host2tcl-input-ring1",
96
"wbm2host-tx-completions-ring3",
97
"wbm2host-tx-completions-ring2",
98
"wbm2host-tx-completions-ring1",
99
"tcl2host-status-ring",
100
};
101
102
/* enum ext_irq_num - irq numbers that can be used by external modules
103
* like datapath
104
*/
105
enum ext_irq_num {
106
host2wbm_desc_feed = 16,
107
host2reo_re_injection,
108
host2reo_command,
109
host2rxdma_monitor_ring3,
110
host2rxdma_monitor_ring2,
111
host2rxdma_monitor_ring1,
112
reo2host_exception,
113
wbm2host_rx_release,
114
reo2host_status,
115
reo2host_destination_ring4,
116
reo2host_destination_ring3,
117
reo2host_destination_ring2,
118
reo2host_destination_ring1,
119
rxdma2host_monitor_destination_mac3,
120
rxdma2host_monitor_destination_mac2,
121
rxdma2host_monitor_destination_mac1,
122
ppdu_end_interrupts_mac3,
123
ppdu_end_interrupts_mac2,
124
ppdu_end_interrupts_mac1,
125
rxdma2host_monitor_status_ring_mac3,
126
rxdma2host_monitor_status_ring_mac2,
127
rxdma2host_monitor_status_ring_mac1,
128
host2rxdma_host_buf_ring_mac3,
129
host2rxdma_host_buf_ring_mac2,
130
host2rxdma_host_buf_ring_mac1,
131
rxdma2host_destination_ring_mac3,
132
rxdma2host_destination_ring_mac2,
133
rxdma2host_destination_ring_mac1,
134
host2tcl_input_ring4,
135
host2tcl_input_ring3,
136
host2tcl_input_ring2,
137
host2tcl_input_ring1,
138
wbm2host_tx_completions_ring3,
139
wbm2host_tx_completions_ring2,
140
wbm2host_tx_completions_ring1,
141
tcl2host_status_ring,
142
};
143
144
static int
145
ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
146
{
147
return ab->pci.msi.irqs[vector];
148
}
149
150
static inline u32
151
ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
152
{
153
u32 window_start = 0;
154
155
/* If offset lies within DP register range, use 1st window */
156
if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
157
window_start = ATH11K_PCI_WINDOW_START;
158
/* If offset lies within CE register range, use 2nd window */
159
else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
160
ATH11K_PCI_WINDOW_RANGE_MASK)
161
window_start = 2 * ATH11K_PCI_WINDOW_START;
162
163
return window_start;
164
}
165
166
static void
167
ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
168
{
169
u32 window_start;
170
171
/* WCN6750 uses static window based register access*/
172
window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
173
174
iowrite32(value, ab->mem + window_start +
175
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
176
}
177
178
static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
179
{
180
u32 window_start;
181
u32 val;
182
183
/* WCN6750 uses static window based register access */
184
window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
185
186
val = ioread32(ab->mem + window_start +
187
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
188
return val;
189
}
190
191
static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
192
.wakeup = NULL,
193
.release = NULL,
194
.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
195
.window_write32 = ath11k_ahb_window_write32_wcn6750,
196
.window_read32 = ath11k_ahb_window_read32_wcn6750,
197
};
198
199
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
200
{
201
return ioread32(ab->mem + offset);
202
}
203
204
static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
205
{
206
iowrite32(value, ab->mem + offset);
207
}
208
209
static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
210
{
211
int i;
212
213
for (i = 0; i < ab->hw_params.ce_count; i++) {
214
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
215
216
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
217
continue;
218
219
tasklet_kill(&ce_pipe->intr_tq);
220
}
221
}
222
223
static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
224
{
225
int i;
226
227
for (i = 0; i < irq_grp->num_irq; i++)
228
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
229
}
230
231
static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
232
{
233
int i;
234
235
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
236
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
237
238
ath11k_ahb_ext_grp_disable(irq_grp);
239
240
if (irq_grp->napi_enabled) {
241
napi_synchronize(&irq_grp->napi);
242
napi_disable(&irq_grp->napi);
243
irq_grp->napi_enabled = false;
244
}
245
}
246
}
247
248
static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
249
{
250
int i;
251
252
for (i = 0; i < irq_grp->num_irq; i++)
253
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
254
}
255
256
static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
257
{
258
u32 val;
259
260
val = ath11k_ahb_read32(ab, offset);
261
ath11k_ahb_write32(ab, offset, val | BIT(bit));
262
}
263
264
static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
265
{
266
u32 val;
267
268
val = ath11k_ahb_read32(ab, offset);
269
ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
270
}
271
272
static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
273
{
274
const struct ce_attr *ce_attr;
275
const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
276
u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
277
278
ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
279
ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
280
ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
281
282
ce_attr = &ab->hw_params.host_ce_config[ce_id];
283
if (ce_attr->src_nentries)
284
ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
285
286
if (ce_attr->dest_nentries) {
287
ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
288
ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
289
ie3_reg_addr);
290
}
291
}
292
293
static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
294
{
295
const struct ce_attr *ce_attr;
296
const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
297
u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
298
299
ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
300
ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
301
ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
302
303
ce_attr = &ab->hw_params.host_ce_config[ce_id];
304
if (ce_attr->src_nentries)
305
ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
306
307
if (ce_attr->dest_nentries) {
308
ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
309
ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
310
ie3_reg_addr);
311
}
312
}
313
314
static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
315
{
316
int i;
317
int irq_idx;
318
319
for (i = 0; i < ab->hw_params.ce_count; i++) {
320
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
321
continue;
322
323
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
324
synchronize_irq(ab->irq_num[irq_idx]);
325
}
326
}
327
328
static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
329
{
330
int i, j;
331
int irq_idx;
332
333
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
334
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
335
336
for (j = 0; j < irq_grp->num_irq; j++) {
337
irq_idx = irq_grp->irqs[j];
338
synchronize_irq(ab->irq_num[irq_idx]);
339
}
340
}
341
}
342
343
static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
344
{
345
int i;
346
347
for (i = 0; i < ab->hw_params.ce_count; i++) {
348
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
349
continue;
350
ath11k_ahb_ce_irq_enable(ab, i);
351
}
352
}
353
354
static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
355
{
356
int i;
357
358
for (i = 0; i < ab->hw_params.ce_count; i++) {
359
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
360
continue;
361
ath11k_ahb_ce_irq_disable(ab, i);
362
}
363
}
364
365
static int ath11k_ahb_start(struct ath11k_base *ab)
366
{
367
ath11k_ahb_ce_irqs_enable(ab);
368
ath11k_ce_rx_post_buf(ab);
369
370
return 0;
371
}
372
373
static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
374
{
375
int i;
376
377
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
378
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
379
380
if (!irq_grp->napi_enabled) {
381
napi_enable(&irq_grp->napi);
382
irq_grp->napi_enabled = true;
383
}
384
ath11k_ahb_ext_grp_enable(irq_grp);
385
}
386
}
387
388
static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
389
{
390
__ath11k_ahb_ext_irq_disable(ab);
391
ath11k_ahb_sync_ext_irqs(ab);
392
}
393
394
static void ath11k_ahb_stop(struct ath11k_base *ab)
395
{
396
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
397
ath11k_ahb_ce_irqs_disable(ab);
398
ath11k_ahb_sync_ce_irqs(ab);
399
ath11k_ahb_kill_tasklets(ab);
400
timer_delete_sync(&ab->rx_replenish_retry);
401
ath11k_ce_cleanup_pipes(ab);
402
}
403
404
static int ath11k_ahb_power_up(struct ath11k_base *ab)
405
{
406
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
407
int ret;
408
409
ret = rproc_boot(ab_ahb->tgt_rproc);
410
if (ret)
411
ath11k_err(ab, "failed to boot the remote processor Q6\n");
412
413
return ret;
414
}
415
416
static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
417
{
418
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
419
420
rproc_shutdown(ab_ahb->tgt_rproc);
421
}
422
423
static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
424
{
425
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
426
427
cfg->tgt_ce_len = ab->hw_params.target_ce_count;
428
cfg->tgt_ce = ab->hw_params.target_ce_config;
429
cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
430
cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
431
ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
432
}
433
434
static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
435
{
436
int i, j;
437
438
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
439
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
440
441
for (j = 0; j < irq_grp->num_irq; j++)
442
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
443
444
netif_napi_del(&irq_grp->napi);
445
free_netdev(irq_grp->napi_ndev);
446
}
447
}
448
449
static void ath11k_ahb_free_irq(struct ath11k_base *ab)
450
{
451
int irq_idx;
452
int i;
453
454
if (ab->hw_params.hybrid_bus_type)
455
return ath11k_pcic_free_irq(ab);
456
457
for (i = 0; i < ab->hw_params.ce_count; i++) {
458
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
459
continue;
460
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
461
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
462
}
463
464
ath11k_ahb_free_ext_irq(ab);
465
}
466
467
static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
468
{
469
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
470
471
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
472
473
ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
474
}
475
476
static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
477
{
478
struct ath11k_ce_pipe *ce_pipe = arg;
479
480
/* last interrupt received for this CE */
481
ce_pipe->timestamp = jiffies;
482
483
ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
484
485
tasklet_schedule(&ce_pipe->intr_tq);
486
487
return IRQ_HANDLED;
488
}
489
490
static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
491
{
492
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
493
struct ath11k_ext_irq_grp,
494
napi);
495
struct ath11k_base *ab = irq_grp->ab;
496
int work_done;
497
498
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
499
if (work_done < budget) {
500
napi_complete_done(napi, work_done);
501
ath11k_ahb_ext_grp_enable(irq_grp);
502
}
503
504
if (work_done > budget)
505
work_done = budget;
506
507
return work_done;
508
}
509
510
static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
511
{
512
struct ath11k_ext_irq_grp *irq_grp = arg;
513
514
/* last interrupt received for this group */
515
irq_grp->timestamp = jiffies;
516
517
ath11k_ahb_ext_grp_disable(irq_grp);
518
519
napi_schedule(&irq_grp->napi);
520
521
return IRQ_HANDLED;
522
}
523
524
static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
525
{
526
struct ath11k_hw_params *hw = &ab->hw_params;
527
int i, j;
528
int irq;
529
int ret;
530
531
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
532
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
533
u32 num_irq = 0;
534
535
irq_grp->ab = ab;
536
irq_grp->grp_id = i;
537
538
irq_grp->napi_ndev = alloc_netdev_dummy(0);
539
if (!irq_grp->napi_ndev)
540
return -ENOMEM;
541
542
netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
543
ath11k_ahb_ext_grp_napi_poll);
544
545
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
546
if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
547
irq_grp->irqs[num_irq++] =
548
wbm2host_tx_completions_ring1 - j;
549
}
550
551
if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
552
irq_grp->irqs[num_irq++] =
553
reo2host_destination_ring1 - j;
554
}
555
556
if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
557
irq_grp->irqs[num_irq++] = reo2host_exception;
558
559
if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
560
irq_grp->irqs[num_irq++] = wbm2host_rx_release;
561
562
if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
563
irq_grp->irqs[num_irq++] = reo2host_status;
564
565
if (j < ab->hw_params.max_radios) {
566
if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
567
irq_grp->irqs[num_irq++] =
568
rxdma2host_destination_ring_mac1 -
569
ath11k_hw_get_mac_from_pdev_id(hw, j);
570
}
571
572
if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
573
irq_grp->irqs[num_irq++] =
574
host2rxdma_host_buf_ring_mac1 -
575
ath11k_hw_get_mac_from_pdev_id(hw, j);
576
}
577
578
if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
579
irq_grp->irqs[num_irq++] =
580
ppdu_end_interrupts_mac1 -
581
ath11k_hw_get_mac_from_pdev_id(hw, j);
582
irq_grp->irqs[num_irq++] =
583
rxdma2host_monitor_status_ring_mac1 -
584
ath11k_hw_get_mac_from_pdev_id(hw, j);
585
}
586
}
587
}
588
irq_grp->num_irq = num_irq;
589
590
for (j = 0; j < irq_grp->num_irq; j++) {
591
int irq_idx = irq_grp->irqs[j];
592
593
irq = platform_get_irq_byname(ab->pdev,
594
irq_name[irq_idx]);
595
ab->irq_num[irq_idx] = irq;
596
irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
597
ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
598
IRQF_TRIGGER_RISING,
599
irq_name[irq_idx], irq_grp);
600
if (ret) {
601
ath11k_err(ab, "failed request_irq for %d\n",
602
irq);
603
}
604
}
605
}
606
607
return 0;
608
}
609
610
static int ath11k_ahb_config_irq(struct ath11k_base *ab)
611
{
612
int irq, irq_idx, i;
613
int ret;
614
615
if (ab->hw_params.hybrid_bus_type)
616
return ath11k_pcic_config_irq(ab);
617
618
/* Configure CE irqs */
619
for (i = 0; i < ab->hw_params.ce_count; i++) {
620
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
621
622
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
623
continue;
624
625
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
626
627
tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
628
irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
629
ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
630
IRQF_TRIGGER_RISING, irq_name[irq_idx],
631
ce_pipe);
632
if (ret)
633
return ret;
634
635
ab->irq_num[irq_idx] = irq;
636
}
637
638
/* Configure external interrupts */
639
ret = ath11k_ahb_config_ext_irq(ab);
640
641
return ret;
642
}
643
644
static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
645
u8 *ul_pipe, u8 *dl_pipe)
646
{
647
const struct service_to_pipe *entry;
648
bool ul_set = false, dl_set = false;
649
int i;
650
651
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
652
entry = &ab->hw_params.svc_to_ce_map[i];
653
654
if (__le32_to_cpu(entry->service_id) != service_id)
655
continue;
656
657
switch (__le32_to_cpu(entry->pipedir)) {
658
case PIPEDIR_NONE:
659
break;
660
case PIPEDIR_IN:
661
WARN_ON(dl_set);
662
*dl_pipe = __le32_to_cpu(entry->pipenum);
663
dl_set = true;
664
break;
665
case PIPEDIR_OUT:
666
WARN_ON(ul_set);
667
*ul_pipe = __le32_to_cpu(entry->pipenum);
668
ul_set = true;
669
break;
670
case PIPEDIR_INOUT:
671
WARN_ON(dl_set);
672
WARN_ON(ul_set);
673
*dl_pipe = __le32_to_cpu(entry->pipenum);
674
*ul_pipe = __le32_to_cpu(entry->pipenum);
675
dl_set = true;
676
ul_set = true;
677
break;
678
}
679
}
680
681
if (WARN_ON(!ul_set || !dl_set))
682
return -ENOENT;
683
684
return 0;
685
}
686
687
static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
688
{
689
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
690
u32 wake_irq;
691
u32 value = 0;
692
int ret;
693
694
if (!device_may_wakeup(ab->dev))
695
return -EPERM;
696
697
wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
698
699
ret = enable_irq_wake(wake_irq);
700
if (ret) {
701
ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
702
return ret;
703
}
704
705
value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
706
ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
707
value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
708
ATH11K_AHB_SMP2P_SMEM_MSG);
709
710
ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
711
ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
712
if (ret) {
713
ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
714
return ret;
715
}
716
717
ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
718
719
return ret;
720
}
721
722
static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
723
{
724
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
725
u32 wake_irq;
726
u32 value = 0;
727
int ret;
728
729
if (!device_may_wakeup(ab->dev))
730
return -EPERM;
731
732
wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
733
734
ret = disable_irq_wake(wake_irq);
735
if (ret) {
736
ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
737
return ret;
738
}
739
740
reinit_completion(&ab->wow.wakeup_completed);
741
742
value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
743
ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
744
value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
745
ATH11K_AHB_SMP2P_SMEM_MSG);
746
747
ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
748
ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
749
if (ret) {
750
ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
751
return ret;
752
}
753
754
ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
755
if (ret == 0) {
756
ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
757
return -ETIMEDOUT;
758
}
759
760
ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
761
762
return 0;
763
}
764
765
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
766
.start = ath11k_ahb_start,
767
.stop = ath11k_ahb_stop,
768
.read32 = ath11k_ahb_read32,
769
.write32 = ath11k_ahb_write32,
770
.read = NULL,
771
.irq_enable = ath11k_ahb_ext_irq_enable,
772
.irq_disable = ath11k_ahb_ext_irq_disable,
773
.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
774
.power_down = ath11k_ahb_power_down,
775
.power_up = ath11k_ahb_power_up,
776
};
777
778
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
779
.start = ath11k_pcic_start,
780
.stop = ath11k_pcic_stop,
781
.read32 = ath11k_pcic_read32,
782
.write32 = ath11k_pcic_write32,
783
.read = NULL,
784
.irq_enable = ath11k_pcic_ext_irq_enable,
785
.irq_disable = ath11k_pcic_ext_irq_disable,
786
.get_msi_address = ath11k_pcic_get_msi_address,
787
.get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
788
.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
789
.power_down = ath11k_ahb_power_down,
790
.power_up = ath11k_ahb_power_up,
791
.suspend = ath11k_ahb_hif_suspend,
792
.resume = ath11k_ahb_hif_resume,
793
.ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
794
.ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
795
};
796
797
static int ath11k_core_get_rproc(struct ath11k_base *ab)
798
{
799
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
800
struct device *dev = ab->dev;
801
struct rproc *prproc;
802
phandle rproc_phandle;
803
804
if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
805
ath11k_err(ab, "failed to get q6_rproc handle\n");
806
return -ENOENT;
807
}
808
809
prproc = rproc_get_by_phandle(rproc_phandle);
810
if (!prproc) {
811
ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
812
return -EPROBE_DEFER;
813
}
814
ab_ahb->tgt_rproc = prproc;
815
816
return 0;
817
}
818
819
static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
820
{
821
struct platform_device *pdev = ab->pdev;
822
phys_addr_t msi_addr_pa;
823
dma_addr_t msi_addr_iova;
824
struct resource *res;
825
int int_prop;
826
int ret;
827
int i;
828
829
ret = ath11k_pcic_init_msi_config(ab);
830
if (ret) {
831
ath11k_err(ab, "failed to init msi config: %d\n", ret);
832
return ret;
833
}
834
835
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
836
if (!res) {
837
ath11k_err(ab, "failed to fetch msi_addr\n");
838
return -ENOENT;
839
}
840
841
msi_addr_pa = res->start;
842
msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
843
DMA_FROM_DEVICE, 0);
844
if (dma_mapping_error(ab->dev, msi_addr_iova))
845
return -ENOMEM;
846
847
ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
848
ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
849
850
ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
851
if (ret)
852
return ret;
853
854
ab->pci.msi.ep_base_data = int_prop + 32;
855
856
for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
857
ret = platform_get_irq(pdev, i);
858
if (ret < 0)
859
return ret;
860
861
ab->pci.msi.irqs[i] = ret;
862
}
863
864
set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
865
866
return 0;
867
}
868
869
static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
870
{
871
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
872
873
if (!ab->hw_params.smp2p_wow_exit)
874
return 0;
875
876
ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
877
&ab_ahb->smp2p_info.smem_bit);
878
if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
879
ath11k_err(ab, "failed to fetch smem state: %ld\n",
880
PTR_ERR(ab_ahb->smp2p_info.smem_state));
881
return PTR_ERR(ab_ahb->smp2p_info.smem_state);
882
}
883
884
return 0;
885
}
886
887
static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
888
{
889
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
890
891
if (!ab->hw_params.smp2p_wow_exit)
892
return;
893
894
qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
895
}
896
897
static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
898
{
899
struct platform_device *pdev = ab->pdev;
900
struct resource *mem_res;
901
void __iomem *mem;
902
903
if (ab->hw_params.hybrid_bus_type)
904
return ath11k_ahb_setup_msi_resources(ab);
905
906
mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
907
if (IS_ERR(mem)) {
908
dev_err(&pdev->dev, "ioremap error\n");
909
return PTR_ERR(mem);
910
}
911
912
ab->mem = mem;
913
ab->mem_len = resource_size(mem_res);
914
915
return 0;
916
}
917
918
static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
919
{
920
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
921
struct device *dev = ab->dev;
922
struct resource r;
923
int ret;
924
925
ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
926
if (ret) {
927
dev_err(dev, "failed to resolve msa fixed region\n");
928
return ret;
929
}
930
931
ab_ahb->fw.msa_paddr = r.start;
932
ab_ahb->fw.msa_size = resource_size(&r);
933
934
ret = of_reserved_mem_region_to_resource(dev->of_node, 1, &r);
935
if (ret) {
936
dev_err(dev, "failed to resolve ce fixed region\n");
937
return ret;
938
}
939
940
ab_ahb->fw.ce_paddr = r.start;
941
ab_ahb->fw.ce_size = resource_size(&r);
942
943
return 0;
944
}
945
946
static int ath11k_ahb_ce_remap(struct ath11k_base *ab)
947
{
948
const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
949
struct platform_device *pdev = ab->pdev;
950
951
if (!ce_remap) {
952
/* no separate CE register space */
953
ab->mem_ce = ab->mem;
954
return 0;
955
}
956
957
/* ce register space is moved out of wcss unlike ipq8074 or ipq6018
958
* and the space is not contiguous, hence remapping the CE registers
959
* to a new space for accessing them.
960
*/
961
ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
962
if (!ab->mem_ce) {
963
dev_err(&pdev->dev, "ce ioremap error\n");
964
return -ENOMEM;
965
}
966
967
return 0;
968
}
969
970
static void ath11k_ahb_ce_unmap(struct ath11k_base *ab)
971
{
972
if (ab->hw_params.ce_remap)
973
iounmap(ab->mem_ce);
974
}
975
976
static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
977
{
978
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
979
struct device *host_dev = ab->dev;
980
struct platform_device_info info = {};
981
struct iommu_domain *iommu_dom;
982
struct platform_device *pdev;
983
struct device_node *node;
984
int ret;
985
986
/* Chipsets not requiring MSA need not initialize
987
* MSA resources, return success in such cases.
988
*/
989
if (!ab->hw_params.fixed_fw_mem)
990
return 0;
991
992
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
993
if (!node) {
994
ab_ahb->fw.use_tz = true;
995
return 0;
996
}
997
998
ret = ath11k_ahb_setup_msa_resources(ab);
999
if (ret) {
1000
ath11k_err(ab, "failed to setup msa resources\n");
1001
return ret;
1002
}
1003
1004
info.fwnode = &node->fwnode;
1005
info.parent = host_dev;
1006
info.name = node->name;
1007
info.dma_mask = DMA_BIT_MASK(32);
1008
1009
pdev = platform_device_register_full(&info);
1010
if (IS_ERR(pdev)) {
1011
of_node_put(node);
1012
return PTR_ERR(pdev);
1013
}
1014
1015
ret = of_dma_configure(&pdev->dev, node, true);
1016
if (ret) {
1017
ath11k_err(ab, "dma configure fail: %d\n", ret);
1018
goto err_unregister;
1019
}
1020
1021
ab_ahb->fw.dev = &pdev->dev;
1022
1023
iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev);
1024
if (IS_ERR(iommu_dom)) {
1025
ath11k_err(ab, "failed to allocate iommu domain\n");
1026
ret = PTR_ERR(iommu_dom);
1027
goto err_unregister;
1028
}
1029
1030
ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1031
if (ret) {
1032
ath11k_err(ab, "could not attach device: %d\n", ret);
1033
goto err_iommu_free;
1034
}
1035
1036
ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1037
ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1038
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1039
if (ret) {
1040
ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1041
goto err_iommu_detach;
1042
}
1043
1044
ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1045
ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1046
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1047
if (ret) {
1048
ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1049
goto err_iommu_unmap;
1050
}
1051
1052
ab_ahb->fw.use_tz = false;
1053
ab_ahb->fw.iommu_domain = iommu_dom;
1054
of_node_put(node);
1055
1056
return 0;
1057
1058
err_iommu_unmap:
1059
iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1060
1061
err_iommu_detach:
1062
iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1063
1064
err_iommu_free:
1065
iommu_domain_free(iommu_dom);
1066
1067
err_unregister:
1068
platform_device_unregister(pdev);
1069
of_node_put(node);
1070
1071
return ret;
1072
}
1073
1074
static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1075
{
1076
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1077
struct iommu_domain *iommu;
1078
size_t unmapped_size;
1079
1080
/* Chipsets not requiring MSA would have not initialized
1081
* MSA resources, return success in such cases.
1082
*/
1083
if (!ab->hw_params.fixed_fw_mem)
1084
return 0;
1085
1086
if (ab_ahb->fw.use_tz)
1087
return 0;
1088
1089
iommu = ab_ahb->fw.iommu_domain;
1090
1091
unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1092
if (unmapped_size != ab_ahb->fw.msa_size)
1093
ath11k_err(ab, "failed to unmap firmware: %zu\n",
1094
unmapped_size);
1095
1096
unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1097
if (unmapped_size != ab_ahb->fw.ce_size)
1098
ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1099
unmapped_size);
1100
1101
iommu_detach_device(iommu, ab_ahb->fw.dev);
1102
iommu_domain_free(iommu);
1103
1104
platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1105
1106
return 0;
1107
}
1108
1109
static int ath11k_ahb_probe(struct platform_device *pdev)
1110
{
1111
struct ath11k_base *ab;
1112
const struct ath11k_hif_ops *hif_ops;
1113
const struct ath11k_pci_ops *pci_ops;
1114
enum ath11k_hw_rev hw_rev;
1115
int ret;
1116
1117
hw_rev = (uintptr_t)device_get_match_data(&pdev->dev);
1118
1119
switch (hw_rev) {
1120
case ATH11K_HW_IPQ8074:
1121
case ATH11K_HW_IPQ6018_HW10:
1122
case ATH11K_HW_IPQ5018_HW10:
1123
hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1124
pci_ops = NULL;
1125
break;
1126
case ATH11K_HW_WCN6750_HW10:
1127
hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1128
pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1129
break;
1130
default:
1131
dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1132
return -EOPNOTSUPP;
1133
}
1134
1135
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1136
if (ret) {
1137
dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1138
return ret;
1139
}
1140
1141
ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1142
ATH11K_BUS_AHB);
1143
if (!ab) {
1144
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1145
return -ENOMEM;
1146
}
1147
1148
ab->hif.ops = hif_ops;
1149
ab->pdev = pdev;
1150
ab->hw_rev = hw_rev;
1151
ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
1152
platform_set_drvdata(pdev, ab);
1153
1154
ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1155
if (ret) {
1156
ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1157
goto err_core_free;
1158
}
1159
1160
ret = ath11k_core_pre_init(ab);
1161
if (ret)
1162
goto err_core_free;
1163
1164
ret = ath11k_ahb_setup_resources(ab);
1165
if (ret)
1166
goto err_core_free;
1167
1168
ret = ath11k_ahb_ce_remap(ab);
1169
if (ret)
1170
goto err_core_free;
1171
1172
ret = ath11k_ahb_fw_resources_init(ab);
1173
if (ret)
1174
goto err_ce_unmap;
1175
1176
ret = ath11k_ahb_setup_smp2p_handle(ab);
1177
if (ret)
1178
goto err_fw_deinit;
1179
1180
ret = ath11k_hal_srng_init(ab);
1181
if (ret)
1182
goto err_release_smp2p_handle;
1183
1184
ret = ath11k_ce_alloc_pipes(ab);
1185
if (ret) {
1186
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1187
goto err_hal_srng_deinit;
1188
}
1189
1190
ath11k_ahb_init_qmi_ce_config(ab);
1191
1192
ret = ath11k_core_get_rproc(ab);
1193
if (ret) {
1194
ath11k_err(ab, "failed to get rproc: %d\n", ret);
1195
goto err_ce_free;
1196
}
1197
1198
ret = ath11k_core_init(ab);
1199
if (ret) {
1200
ath11k_err(ab, "failed to init core: %d\n", ret);
1201
goto err_ce_free;
1202
}
1203
1204
ret = ath11k_ahb_config_irq(ab);
1205
if (ret) {
1206
ath11k_err(ab, "failed to configure irq: %d\n", ret);
1207
goto err_ce_free;
1208
}
1209
1210
ath11k_qmi_fwreset_from_cold_boot(ab);
1211
1212
return 0;
1213
1214
err_ce_free:
1215
ath11k_ce_free_pipes(ab);
1216
1217
err_hal_srng_deinit:
1218
ath11k_hal_srng_deinit(ab);
1219
1220
err_release_smp2p_handle:
1221
ath11k_ahb_release_smp2p_handle(ab);
1222
1223
err_fw_deinit:
1224
ath11k_ahb_fw_resource_deinit(ab);
1225
1226
err_ce_unmap:
1227
ath11k_ahb_ce_unmap(ab);
1228
1229
err_core_free:
1230
ath11k_core_free(ab);
1231
platform_set_drvdata(pdev, NULL);
1232
1233
return ret;
1234
}
1235
1236
static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1237
{
1238
unsigned long left;
1239
1240
if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1241
left = wait_for_completion_timeout(&ab->driver_recovery,
1242
ATH11K_AHB_RECOVERY_TIMEOUT);
1243
if (!left)
1244
ath11k_warn(ab, "failed to receive recovery response completion\n");
1245
}
1246
1247
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1248
cancel_work_sync(&ab->restart_work);
1249
cancel_work_sync(&ab->qmi.event_work);
1250
}
1251
1252
static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1253
{
1254
struct platform_device *pdev = ab->pdev;
1255
1256
ath11k_ahb_free_irq(ab);
1257
ath11k_hal_srng_deinit(ab);
1258
ath11k_ahb_release_smp2p_handle(ab);
1259
ath11k_ahb_fw_resource_deinit(ab);
1260
ath11k_ce_free_pipes(ab);
1261
ath11k_ahb_ce_unmap(ab);
1262
1263
ath11k_core_free(ab);
1264
platform_set_drvdata(pdev, NULL);
1265
}
1266
1267
static void ath11k_ahb_remove(struct platform_device *pdev)
1268
{
1269
struct ath11k_base *ab = platform_get_drvdata(pdev);
1270
1271
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1272
ath11k_ahb_power_down(ab, false);
1273
ath11k_debugfs_soc_destroy(ab);
1274
ath11k_qmi_deinit_service(ab);
1275
goto qmi_fail;
1276
}
1277
1278
ath11k_ahb_remove_prepare(ab);
1279
ath11k_core_deinit(ab);
1280
1281
qmi_fail:
1282
ath11k_fw_destroy(ab);
1283
ath11k_ahb_free_resources(ab);
1284
}
1285
1286
static void ath11k_ahb_shutdown(struct platform_device *pdev)
1287
{
1288
struct ath11k_base *ab = platform_get_drvdata(pdev);
1289
1290
/* platform shutdown() & remove() are mutually exclusive.
1291
* remove() is invoked during rmmod & shutdown() during
1292
* system reboot/shutdown.
1293
*/
1294
ath11k_ahb_remove_prepare(ab);
1295
1296
if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1297
goto free_resources;
1298
1299
ath11k_core_deinit(ab);
1300
1301
free_resources:
1302
ath11k_fw_destroy(ab);
1303
ath11k_ahb_free_resources(ab);
1304
}
1305
1306
static struct platform_driver ath11k_ahb_driver = {
1307
.driver = {
1308
.name = "ath11k",
1309
.of_match_table = ath11k_ahb_of_match,
1310
},
1311
.probe = ath11k_ahb_probe,
1312
.remove = ath11k_ahb_remove,
1313
.shutdown = ath11k_ahb_shutdown,
1314
};
1315
1316
module_platform_driver(ath11k_ahb_driver);
1317
1318
MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1319
MODULE_LICENSE("Dual BSD/GPL");
1320
1321