Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath10k/snoc.c
106597 views
1
// SPDX-License-Identifier: ISC
2
/*
3
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
4
*/
5
6
#include <linux/bits.h>
7
#include <linux/clk.h>
8
#include <linux/kernel.h>
9
#include <linux/module.h>
10
#include <linux/of.h>
11
#include <linux/of_device.h>
12
#include <linux/platform_device.h>
13
#include <linux/property.h>
14
#include <linux/regulator/consumer.h>
15
#include <linux/remoteproc/qcom_rproc.h>
16
#include <linux/of_reserved_mem.h>
17
#include <linux/iommu.h>
18
19
#include "ce.h"
20
#include "coredump.h"
21
#include "debug.h"
22
#include "hif.h"
23
#include "htc.h"
24
#include "snoc.h"
25
26
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
27
#define CE_POLL_PIPE 4
28
#define ATH10K_SNOC_WAKE_IRQ 2
29
30
static char *const ce_name[] = {
31
"WLAN_CE_0",
32
"WLAN_CE_1",
33
"WLAN_CE_2",
34
"WLAN_CE_3",
35
"WLAN_CE_4",
36
"WLAN_CE_5",
37
"WLAN_CE_6",
38
"WLAN_CE_7",
39
"WLAN_CE_8",
40
"WLAN_CE_9",
41
"WLAN_CE_10",
42
"WLAN_CE_11",
43
};
44
45
static const char * const ath10k_regulators[] = {
46
"vdd-0.8-cx-mx",
47
"vdd-1.8-xo",
48
"vdd-1.3-rfa",
49
"vdd-3.3-ch0",
50
"vdd-3.3-ch1",
51
};
52
53
static const char * const ath10k_clocks[] = {
54
"cxo_ref_clk_pin", "qdss",
55
};
56
57
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
58
static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
59
static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
60
static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
61
static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
62
static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
63
64
static const struct ath10k_snoc_drv_priv drv_priv = {
65
.hw_rev = ATH10K_HW_WCN3990,
66
.dma_mask = DMA_BIT_MASK(35),
67
.msa_size = 0x100000,
68
};
69
70
#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
71
#define WCN3990_DST_WR_IDX_OFFSET 0x40
72
73
static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
74
{
75
.ce_id = __cpu_to_le16(0),
76
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
77
},
78
79
{
80
.ce_id = __cpu_to_le16(3),
81
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
82
},
83
84
{
85
.ce_id = __cpu_to_le16(4),
86
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
87
},
88
89
{
90
.ce_id = __cpu_to_le16(5),
91
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
92
},
93
94
{
95
.ce_id = __cpu_to_le16(7),
96
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
97
},
98
99
{
100
.ce_id = __cpu_to_le16(1),
101
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
102
},
103
104
{
105
.ce_id = __cpu_to_le16(2),
106
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
107
},
108
109
{
110
.ce_id = __cpu_to_le16(7),
111
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
112
},
113
114
{
115
.ce_id = __cpu_to_le16(8),
116
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
117
},
118
119
{
120
.ce_id = __cpu_to_le16(9),
121
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
122
},
123
124
{
125
.ce_id = __cpu_to_le16(10),
126
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
127
},
128
129
{
130
.ce_id = __cpu_to_le16(11),
131
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
132
},
133
};
134
135
static struct ce_attr host_ce_config_wlan[] = {
136
/* CE0: host->target HTC control streams */
137
{
138
.flags = CE_ATTR_FLAGS,
139
.src_nentries = 16,
140
.src_sz_max = 2048,
141
.dest_nentries = 0,
142
.send_cb = ath10k_snoc_htc_tx_cb,
143
},
144
145
/* CE1: target->host HTT + HTC control */
146
{
147
.flags = CE_ATTR_FLAGS,
148
.src_nentries = 0,
149
.src_sz_max = 2048,
150
.dest_nentries = 512,
151
.recv_cb = ath10k_snoc_htt_htc_rx_cb,
152
},
153
154
/* CE2: target->host WMI */
155
{
156
.flags = CE_ATTR_FLAGS,
157
.src_nentries = 0,
158
.src_sz_max = 2048,
159
.dest_nentries = 64,
160
.recv_cb = ath10k_snoc_htc_rx_cb,
161
},
162
163
/* CE3: host->target WMI */
164
{
165
.flags = CE_ATTR_FLAGS,
166
.src_nentries = 32,
167
.src_sz_max = 2048,
168
.dest_nentries = 0,
169
.send_cb = ath10k_snoc_htc_tx_cb,
170
},
171
172
/* CE4: host->target HTT */
173
{
174
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
175
.src_nentries = 2048,
176
.src_sz_max = 256,
177
.dest_nentries = 0,
178
.send_cb = ath10k_snoc_htt_tx_cb,
179
},
180
181
/* CE5: target->host HTT (ipa_uc->target ) */
182
{
183
.flags = CE_ATTR_FLAGS,
184
.src_nentries = 0,
185
.src_sz_max = 512,
186
.dest_nentries = 512,
187
.recv_cb = ath10k_snoc_htt_rx_cb,
188
},
189
190
/* CE6: target autonomous hif_memcpy */
191
{
192
.flags = CE_ATTR_FLAGS,
193
.src_nentries = 0,
194
.src_sz_max = 0,
195
.dest_nentries = 0,
196
},
197
198
/* CE7: ce_diag, the Diagnostic Window */
199
{
200
.flags = CE_ATTR_FLAGS,
201
.src_nentries = 2,
202
.src_sz_max = 2048,
203
.dest_nentries = 2,
204
},
205
206
/* CE8: Target to uMC */
207
{
208
.flags = CE_ATTR_FLAGS,
209
.src_nentries = 0,
210
.src_sz_max = 2048,
211
.dest_nentries = 128,
212
},
213
214
/* CE9 target->host HTT */
215
{
216
.flags = CE_ATTR_FLAGS,
217
.src_nentries = 0,
218
.src_sz_max = 2048,
219
.dest_nentries = 512,
220
.recv_cb = ath10k_snoc_htt_htc_rx_cb,
221
},
222
223
/* CE10: target->host HTT */
224
{
225
.flags = CE_ATTR_FLAGS,
226
.src_nentries = 0,
227
.src_sz_max = 2048,
228
.dest_nentries = 512,
229
.recv_cb = ath10k_snoc_htt_htc_rx_cb,
230
},
231
232
/* CE11: target -> host PKTLOG */
233
{
234
.flags = CE_ATTR_FLAGS,
235
.src_nentries = 0,
236
.src_sz_max = 2048,
237
.dest_nentries = 512,
238
.recv_cb = ath10k_snoc_pktlog_rx_cb,
239
},
240
};
241
242
static struct ce_pipe_config target_ce_config_wlan[] = {
243
/* CE0: host->target HTC control and raw streams */
244
{
245
.pipenum = __cpu_to_le32(0),
246
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
247
.nentries = __cpu_to_le32(32),
248
.nbytes_max = __cpu_to_le32(2048),
249
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
250
.reserved = __cpu_to_le32(0),
251
},
252
253
/* CE1: target->host HTT + HTC control */
254
{
255
.pipenum = __cpu_to_le32(1),
256
.pipedir = __cpu_to_le32(PIPEDIR_IN),
257
.nentries = __cpu_to_le32(32),
258
.nbytes_max = __cpu_to_le32(2048),
259
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
260
.reserved = __cpu_to_le32(0),
261
},
262
263
/* CE2: target->host WMI */
264
{
265
.pipenum = __cpu_to_le32(2),
266
.pipedir = __cpu_to_le32(PIPEDIR_IN),
267
.nentries = __cpu_to_le32(64),
268
.nbytes_max = __cpu_to_le32(2048),
269
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
270
.reserved = __cpu_to_le32(0),
271
},
272
273
/* CE3: host->target WMI */
274
{
275
.pipenum = __cpu_to_le32(3),
276
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
277
.nentries = __cpu_to_le32(32),
278
.nbytes_max = __cpu_to_le32(2048),
279
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
280
.reserved = __cpu_to_le32(0),
281
},
282
283
/* CE4: host->target HTT */
284
{
285
.pipenum = __cpu_to_le32(4),
286
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
287
.nentries = __cpu_to_le32(256),
288
.nbytes_max = __cpu_to_le32(256),
289
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
290
.reserved = __cpu_to_le32(0),
291
},
292
293
/* CE5: target->host HTT (HIF->HTT) */
294
{
295
.pipenum = __cpu_to_le32(5),
296
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
297
.nentries = __cpu_to_le32(1024),
298
.nbytes_max = __cpu_to_le32(64),
299
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
300
.reserved = __cpu_to_le32(0),
301
},
302
303
/* CE6: Reserved for target autonomous hif_memcpy */
304
{
305
.pipenum = __cpu_to_le32(6),
306
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
307
.nentries = __cpu_to_le32(32),
308
.nbytes_max = __cpu_to_le32(16384),
309
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
310
.reserved = __cpu_to_le32(0),
311
},
312
313
/* CE7 used only by Host */
314
{
315
.pipenum = __cpu_to_le32(7),
316
.pipedir = __cpu_to_le32(4),
317
.nentries = __cpu_to_le32(0),
318
.nbytes_max = __cpu_to_le32(0),
319
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
320
.reserved = __cpu_to_le32(0),
321
},
322
323
/* CE8 Target to uMC */
324
{
325
.pipenum = __cpu_to_le32(8),
326
.pipedir = __cpu_to_le32(PIPEDIR_IN),
327
.nentries = __cpu_to_le32(32),
328
.nbytes_max = __cpu_to_le32(2048),
329
.flags = __cpu_to_le32(0),
330
.reserved = __cpu_to_le32(0),
331
},
332
333
/* CE9 target->host HTT */
334
{
335
.pipenum = __cpu_to_le32(9),
336
.pipedir = __cpu_to_le32(PIPEDIR_IN),
337
.nentries = __cpu_to_le32(32),
338
.nbytes_max = __cpu_to_le32(2048),
339
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
340
.reserved = __cpu_to_le32(0),
341
},
342
343
/* CE10 target->host HTT */
344
{
345
.pipenum = __cpu_to_le32(10),
346
.pipedir = __cpu_to_le32(PIPEDIR_IN),
347
.nentries = __cpu_to_le32(32),
348
.nbytes_max = __cpu_to_le32(2048),
349
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
350
.reserved = __cpu_to_le32(0),
351
},
352
353
/* CE11 target autonomous qcache memcpy */
354
{
355
.pipenum = __cpu_to_le32(11),
356
.pipedir = __cpu_to_le32(PIPEDIR_IN),
357
.nentries = __cpu_to_le32(32),
358
.nbytes_max = __cpu_to_le32(2048),
359
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
360
.reserved = __cpu_to_le32(0),
361
},
362
};
363
364
static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
365
{
366
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
367
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
368
__cpu_to_le32(3),
369
},
370
{
371
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
372
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
373
__cpu_to_le32(2),
374
},
375
{
376
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
377
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
378
__cpu_to_le32(3),
379
},
380
{
381
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
382
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
383
__cpu_to_le32(2),
384
},
385
{
386
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
387
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
388
__cpu_to_le32(3),
389
},
390
{
391
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
392
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
393
__cpu_to_le32(2),
394
},
395
{
396
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
397
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
398
__cpu_to_le32(3),
399
},
400
{
401
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
402
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
403
__cpu_to_le32(2),
404
},
405
{
406
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
407
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
408
__cpu_to_le32(3),
409
},
410
{
411
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
412
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
413
__cpu_to_le32(2),
414
},
415
{
416
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
417
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
418
__cpu_to_le32(0),
419
},
420
{
421
__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
422
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
423
__cpu_to_le32(2),
424
},
425
{ /* not used */
426
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
427
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
428
__cpu_to_le32(0),
429
},
430
{ /* not used */
431
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
432
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
433
__cpu_to_le32(2),
434
},
435
{
436
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
437
__cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
438
__cpu_to_le32(4),
439
},
440
{
441
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
442
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
443
__cpu_to_le32(1),
444
},
445
{ /* not used */
446
__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
447
__cpu_to_le32(PIPEDIR_OUT),
448
__cpu_to_le32(5),
449
},
450
{ /* in = DL = target -> host */
451
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
452
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
453
__cpu_to_le32(9),
454
},
455
{ /* in = DL = target -> host */
456
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
457
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
458
__cpu_to_le32(10),
459
},
460
{ /* in = DL = target -> host pktlog */
461
__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
462
__cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
463
__cpu_to_le32(11),
464
},
465
/* (Additions here) */
466
467
{ /* must be last */
468
__cpu_to_le32(0),
469
__cpu_to_le32(0),
470
__cpu_to_le32(0),
471
},
472
};
473
474
static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
475
{
476
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
477
478
iowrite32(value, ar_snoc->mem + offset);
479
}
480
481
static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
482
{
483
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
484
u32 val;
485
486
val = ioread32(ar_snoc->mem + offset);
487
488
return val;
489
}
490
491
static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
492
{
493
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
494
struct ath10k *ar = pipe->hif_ce_state;
495
struct ath10k_ce *ce = ath10k_ce_priv(ar);
496
struct sk_buff *skb;
497
dma_addr_t paddr;
498
int ret;
499
500
skb = dev_alloc_skb(pipe->buf_sz);
501
if (!skb)
502
return -ENOMEM;
503
504
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
505
506
paddr = dma_map_single(ar->dev, skb->data,
507
skb->len + skb_tailroom(skb),
508
DMA_FROM_DEVICE);
509
if (unlikely(dma_mapping_error(ar->dev, paddr))) {
510
ath10k_warn(ar, "failed to dma map snoc rx buf\n");
511
dev_kfree_skb_any(skb);
512
return -EIO;
513
}
514
515
ATH10K_SKB_RXCB(skb)->paddr = paddr;
516
517
spin_lock_bh(&ce->ce_lock);
518
ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
519
spin_unlock_bh(&ce->ce_lock);
520
if (ret) {
521
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
522
DMA_FROM_DEVICE);
523
dev_kfree_skb_any(skb);
524
return ret;
525
}
526
527
return 0;
528
}
529
530
static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
531
{
532
struct ath10k *ar = pipe->hif_ce_state;
533
struct ath10k_ce *ce = ath10k_ce_priv(ar);
534
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
535
struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
536
int ret, num;
537
538
if (pipe->buf_sz == 0)
539
return;
540
541
if (!ce_pipe->dest_ring)
542
return;
543
544
spin_lock_bh(&ce->ce_lock);
545
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
546
spin_unlock_bh(&ce->ce_lock);
547
while (num--) {
548
ret = __ath10k_snoc_rx_post_buf(pipe);
549
if (ret) {
550
if (ret == -ENOSPC)
551
break;
552
ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
553
mod_timer(&ar_snoc->rx_post_retry, jiffies +
554
ATH10K_SNOC_RX_POST_RETRY_MS);
555
break;
556
}
557
}
558
}
559
560
static void ath10k_snoc_rx_post(struct ath10k *ar)
561
{
562
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
563
int i;
564
565
for (i = 0; i < CE_COUNT; i++)
566
ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
567
}
568
569
static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
570
void (*callback)(struct ath10k *ar,
571
struct sk_buff *skb))
572
{
573
struct ath10k *ar = ce_state->ar;
574
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
575
struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
576
struct sk_buff *skb;
577
struct sk_buff_head list;
578
void *transfer_context;
579
unsigned int nbytes, max_nbytes;
580
581
__skb_queue_head_init(&list);
582
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
583
&nbytes) == 0) {
584
skb = transfer_context;
585
max_nbytes = skb->len + skb_tailroom(skb);
586
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
587
max_nbytes, DMA_FROM_DEVICE);
588
589
if (unlikely(max_nbytes < nbytes)) {
590
ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
591
nbytes, max_nbytes);
592
dev_kfree_skb_any(skb);
593
continue;
594
}
595
596
skb_put(skb, nbytes);
597
__skb_queue_tail(&list, skb);
598
}
599
600
while ((skb = __skb_dequeue(&list))) {
601
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
602
ce_state->id, skb->len);
603
604
callback(ar, skb);
605
}
606
607
ath10k_snoc_rx_post_pipe(pipe_info);
608
}
609
610
static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
611
{
612
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
613
}
614
615
static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
616
{
617
/* CE4 polling needs to be done whenever CE pipe which transports
618
* HTT Rx (target->host) is processed.
619
*/
620
ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
621
622
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
623
}
624
625
/* Called by lower (CE) layer when data is received from the Target.
626
* WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
627
*/
628
static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
629
{
630
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
631
}
632
633
static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
634
{
635
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
636
ath10k_htt_t2h_msg_handler(ar, skb);
637
}
638
639
static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
640
{
641
ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
642
ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
643
}
644
645
static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
646
{
647
struct ath10k_snoc *ar_snoc = timer_container_of(ar_snoc, t,
648
rx_post_retry);
649
struct ath10k *ar = ar_snoc->ar;
650
651
ath10k_snoc_rx_post(ar);
652
}
653
654
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
655
{
656
struct ath10k *ar = ce_state->ar;
657
struct sk_buff_head list;
658
struct sk_buff *skb;
659
660
__skb_queue_head_init(&list);
661
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
662
if (!skb)
663
continue;
664
665
__skb_queue_tail(&list, skb);
666
}
667
668
while ((skb = __skb_dequeue(&list)))
669
ath10k_htc_tx_completion_handler(ar, skb);
670
}
671
672
static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
673
{
674
struct ath10k *ar = ce_state->ar;
675
struct sk_buff *skb;
676
677
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
678
if (!skb)
679
continue;
680
681
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
682
skb->len, DMA_TO_DEVICE);
683
ath10k_htt_hif_tx_complete(ar, skb);
684
}
685
}
686
687
static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
688
struct ath10k_hif_sg_item *items, int n_items)
689
{
690
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
691
struct ath10k_ce *ce = ath10k_ce_priv(ar);
692
struct ath10k_snoc_pipe *snoc_pipe;
693
struct ath10k_ce_pipe *ce_pipe;
694
int err, i = 0;
695
696
snoc_pipe = &ar_snoc->pipe_info[pipe_id];
697
ce_pipe = snoc_pipe->ce_hdl;
698
spin_lock_bh(&ce->ce_lock);
699
700
for (i = 0; i < n_items - 1; i++) {
701
ath10k_dbg(ar, ATH10K_DBG_SNOC,
702
"snoc tx item %d paddr %pad len %d n_items %d\n",
703
i, &items[i].paddr, items[i].len, n_items);
704
705
err = ath10k_ce_send_nolock(ce_pipe,
706
items[i].transfer_context,
707
items[i].paddr,
708
items[i].len,
709
items[i].transfer_id,
710
CE_SEND_FLAG_GATHER);
711
if (err)
712
goto err;
713
}
714
715
ath10k_dbg(ar, ATH10K_DBG_SNOC,
716
"snoc tx item %d paddr %pad len %d n_items %d\n",
717
i, &items[i].paddr, items[i].len, n_items);
718
719
err = ath10k_ce_send_nolock(ce_pipe,
720
items[i].transfer_context,
721
items[i].paddr,
722
items[i].len,
723
items[i].transfer_id,
724
0);
725
if (err)
726
goto err;
727
728
spin_unlock_bh(&ce->ce_lock);
729
730
return 0;
731
732
err:
733
for (; i > 0; i--)
734
__ath10k_ce_send_revert(ce_pipe);
735
736
spin_unlock_bh(&ce->ce_lock);
737
return err;
738
}
739
740
static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
741
struct bmi_target_info *target_info)
742
{
743
target_info->version = ATH10K_HW_WCN3990;
744
target_info->type = ATH10K_HW_WCN3990;
745
746
return 0;
747
}
748
749
static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
750
{
751
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
752
753
ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
754
755
return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
756
}
757
758
static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
759
int force)
760
{
761
int resources;
762
763
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
764
765
if (!force) {
766
resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
767
768
if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
769
return;
770
}
771
ath10k_ce_per_engine_service(ar, pipe);
772
}
773
774
static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
775
u16 service_id,
776
u8 *ul_pipe, u8 *dl_pipe)
777
{
778
const struct ce_service_to_pipe *entry;
779
bool ul_set = false, dl_set = false;
780
int i;
781
782
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
783
784
for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
785
entry = &target_service_to_ce_map_wlan[i];
786
787
if (__le32_to_cpu(entry->service_id) != service_id)
788
continue;
789
790
switch (__le32_to_cpu(entry->pipedir)) {
791
case PIPEDIR_NONE:
792
break;
793
case PIPEDIR_IN:
794
WARN_ON(dl_set);
795
*dl_pipe = __le32_to_cpu(entry->pipenum);
796
dl_set = true;
797
break;
798
case PIPEDIR_OUT:
799
WARN_ON(ul_set);
800
*ul_pipe = __le32_to_cpu(entry->pipenum);
801
ul_set = true;
802
break;
803
case PIPEDIR_INOUT:
804
WARN_ON(dl_set);
805
WARN_ON(ul_set);
806
*dl_pipe = __le32_to_cpu(entry->pipenum);
807
*ul_pipe = __le32_to_cpu(entry->pipenum);
808
dl_set = true;
809
ul_set = true;
810
break;
811
}
812
}
813
814
if (!ul_set || !dl_set)
815
return -ENOENT;
816
817
return 0;
818
}
819
820
static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
821
u8 *ul_pipe, u8 *dl_pipe)
822
{
823
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
824
825
(void)ath10k_snoc_hif_map_service_to_pipe(ar,
826
ATH10K_HTC_SVC_ID_RSVD_CTRL,
827
ul_pipe, dl_pipe);
828
}
829
830
static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
831
{
832
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
833
int id;
834
835
for (id = 0; id < CE_COUNT_MAX; id++)
836
disable_irq(ar_snoc->ce_irqs[id].irq_line);
837
}
838
839
static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
840
{
841
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
842
int id;
843
844
for (id = 0; id < CE_COUNT_MAX; id++)
845
enable_irq(ar_snoc->ce_irqs[id].irq_line);
846
}
847
848
static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
849
{
850
struct ath10k_ce_pipe *ce_pipe;
851
struct ath10k_ce_ring *ce_ring;
852
struct sk_buff *skb;
853
struct ath10k *ar;
854
int i;
855
856
ar = snoc_pipe->hif_ce_state;
857
ce_pipe = snoc_pipe->ce_hdl;
858
ce_ring = ce_pipe->dest_ring;
859
860
if (!ce_ring)
861
return;
862
863
if (!snoc_pipe->buf_sz)
864
return;
865
866
for (i = 0; i < ce_ring->nentries; i++) {
867
skb = ce_ring->per_transfer_context[i];
868
if (!skb)
869
continue;
870
871
ce_ring->per_transfer_context[i] = NULL;
872
873
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
874
skb->len + skb_tailroom(skb),
875
DMA_FROM_DEVICE);
876
dev_kfree_skb_any(skb);
877
}
878
}
879
880
static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
881
{
882
struct ath10k_ce_pipe *ce_pipe;
883
struct ath10k_ce_ring *ce_ring;
884
struct sk_buff *skb;
885
struct ath10k *ar;
886
int i;
887
888
ar = snoc_pipe->hif_ce_state;
889
ce_pipe = snoc_pipe->ce_hdl;
890
ce_ring = ce_pipe->src_ring;
891
892
if (!ce_ring)
893
return;
894
895
if (!snoc_pipe->buf_sz)
896
return;
897
898
for (i = 0; i < ce_ring->nentries; i++) {
899
skb = ce_ring->per_transfer_context[i];
900
if (!skb)
901
continue;
902
903
ce_ring->per_transfer_context[i] = NULL;
904
905
ath10k_htc_tx_completion_handler(ar, skb);
906
}
907
}
908
909
static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
910
{
911
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
912
struct ath10k_snoc_pipe *pipe_info;
913
int pipe_num;
914
915
timer_delete_sync(&ar_snoc->rx_post_retry);
916
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
917
pipe_info = &ar_snoc->pipe_info[pipe_num];
918
ath10k_snoc_rx_pipe_cleanup(pipe_info);
919
ath10k_snoc_tx_pipe_cleanup(pipe_info);
920
}
921
}
922
923
static void ath10k_snoc_hif_stop(struct ath10k *ar)
924
{
925
if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
926
ath10k_snoc_irq_disable(ar);
927
928
ath10k_core_napi_sync_disable(ar);
929
ath10k_snoc_buffer_cleanup(ar);
930
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
931
}
932
933
static int ath10k_snoc_hif_start(struct ath10k *ar)
934
{
935
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
936
937
bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
938
939
netif_threaded_enable(ar->napi_dev);
940
ath10k_core_napi_enable(ar);
941
/* IRQs are left enabled when we restart due to a firmware crash */
942
if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
943
ath10k_snoc_irq_enable(ar);
944
ath10k_snoc_rx_post(ar);
945
946
clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
947
948
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
949
950
return 0;
951
}
952
953
static int ath10k_snoc_init_pipes(struct ath10k *ar)
954
{
955
int i, ret;
956
957
for (i = 0; i < CE_COUNT; i++) {
958
ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
959
if (ret) {
960
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
961
i, ret);
962
return ret;
963
}
964
}
965
966
return 0;
967
}
968
969
static int ath10k_snoc_wlan_enable(struct ath10k *ar,
970
enum ath10k_firmware_mode fw_mode)
971
{
972
struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
973
struct ath10k_qmi_wlan_enable_cfg cfg;
974
enum wlfw_driver_mode_enum_v01 mode;
975
int pipe_num;
976
977
for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
978
tgt_cfg[pipe_num].pipe_num =
979
target_ce_config_wlan[pipe_num].pipenum;
980
tgt_cfg[pipe_num].pipe_dir =
981
target_ce_config_wlan[pipe_num].pipedir;
982
tgt_cfg[pipe_num].nentries =
983
target_ce_config_wlan[pipe_num].nentries;
984
tgt_cfg[pipe_num].nbytes_max =
985
target_ce_config_wlan[pipe_num].nbytes_max;
986
tgt_cfg[pipe_num].flags =
987
target_ce_config_wlan[pipe_num].flags;
988
tgt_cfg[pipe_num].reserved = 0;
989
}
990
991
cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
992
sizeof(struct ath10k_tgt_pipe_cfg);
993
cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
994
&tgt_cfg;
995
cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
996
sizeof(struct ath10k_svc_pipe_cfg);
997
cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
998
&target_service_to_ce_map_wlan;
999
cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
1000
cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
1001
&target_shadow_reg_cfg_map;
1002
1003
switch (fw_mode) {
1004
case ATH10K_FIRMWARE_MODE_NORMAL:
1005
mode = QMI_WLFW_MISSION_V01;
1006
break;
1007
case ATH10K_FIRMWARE_MODE_UTF:
1008
mode = QMI_WLFW_FTM_V01;
1009
break;
1010
default:
1011
ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
1012
return -EINVAL;
1013
}
1014
1015
return ath10k_qmi_wlan_enable(ar, &cfg, mode,
1016
NULL);
1017
}
1018
1019
static int ath10k_hw_power_on(struct ath10k *ar)
1020
{
1021
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1022
int ret;
1023
1024
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1025
1026
ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
1027
if (ret)
1028
return ret;
1029
1030
ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
1031
if (ret)
1032
goto vreg_off;
1033
1034
return ret;
1035
1036
vreg_off:
1037
regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1038
return ret;
1039
}
1040
1041
static int ath10k_hw_power_off(struct ath10k *ar)
1042
{
1043
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1044
1045
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1046
1047
clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
1048
1049
return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1050
}
1051
1052
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1053
{
1054
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1055
1056
/* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1057
* flags are not set, it means that the driver has restarted
1058
* due to a crash inject via debugfs. In this case, the driver
1059
* needs to restart the firmware and hence send qmi wlan disable,
1060
* during the driver restart sequence.
1061
*/
1062
if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1063
!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1064
ath10k_qmi_wlan_disable(ar);
1065
}
1066
1067
static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1068
{
1069
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1070
1071
ath10k_snoc_wlan_disable(ar);
1072
ath10k_ce_free_rri(ar);
1073
ath10k_hw_power_off(ar);
1074
}
1075
1076
static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1077
enum ath10k_firmware_mode fw_mode)
1078
{
1079
int ret;
1080
1081
ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1082
__func__, ar->state);
1083
1084
ret = ath10k_hw_power_on(ar);
1085
if (ret) {
1086
ath10k_err(ar, "failed to power on device: %d\n", ret);
1087
return ret;
1088
}
1089
1090
ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1091
if (ret) {
1092
ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1093
goto err_hw_power_off;
1094
}
1095
1096
ath10k_ce_alloc_rri(ar);
1097
1098
ret = ath10k_snoc_init_pipes(ar);
1099
if (ret) {
1100
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1101
goto err_free_rri;
1102
}
1103
1104
ath10k_ce_enable_interrupts(ar);
1105
1106
return 0;
1107
1108
err_free_rri:
1109
ath10k_ce_free_rri(ar);
1110
ath10k_snoc_wlan_disable(ar);
1111
1112
err_hw_power_off:
1113
ath10k_hw_power_off(ar);
1114
1115
return ret;
1116
}
1117
1118
static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
1119
u8 fw_log_mode)
1120
{
1121
u8 fw_dbg_mode;
1122
1123
if (fw_log_mode)
1124
fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
1125
else
1126
fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
1127
1128
return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
1129
}
1130
1131
#ifdef CONFIG_PM
1132
static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1133
{
1134
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1135
int ret;
1136
1137
if (!device_may_wakeup(ar->dev))
1138
return -EPERM;
1139
1140
ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1141
if (ret) {
1142
ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1143
return ret;
1144
}
1145
1146
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1147
1148
return ret;
1149
}
1150
1151
static int ath10k_snoc_hif_resume(struct ath10k *ar)
1152
{
1153
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1154
int ret;
1155
1156
if (!device_may_wakeup(ar->dev))
1157
return -EPERM;
1158
1159
ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1160
if (ret) {
1161
ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1162
return ret;
1163
}
1164
1165
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1166
1167
return ret;
1168
}
1169
#endif
1170
1171
static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1172
.read32 = ath10k_snoc_read32,
1173
.write32 = ath10k_snoc_write32,
1174
.start = ath10k_snoc_hif_start,
1175
.stop = ath10k_snoc_hif_stop,
1176
.map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
1177
.get_default_pipe = ath10k_snoc_hif_get_default_pipe,
1178
.power_up = ath10k_snoc_hif_power_up,
1179
.power_down = ath10k_snoc_hif_power_down,
1180
.tx_sg = ath10k_snoc_hif_tx_sg,
1181
.send_complete_check = ath10k_snoc_hif_send_complete_check,
1182
.get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
1183
.get_target_info = ath10k_snoc_hif_get_target_info,
1184
.set_target_log_mode = ath10k_snoc_hif_set_target_log_mode,
1185
1186
#ifdef CONFIG_PM
1187
.suspend = ath10k_snoc_hif_suspend,
1188
.resume = ath10k_snoc_hif_resume,
1189
#endif
1190
};
1191
1192
static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1193
.read32 = ath10k_snoc_read32,
1194
.write32 = ath10k_snoc_write32,
1195
};
1196
1197
static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1198
{
1199
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1200
int i;
1201
1202
for (i = 0; i < CE_COUNT_MAX; i++) {
1203
if (ar_snoc->ce_irqs[i].irq_line == irq)
1204
return i;
1205
}
1206
ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1207
1208
return -EINVAL;
1209
}
1210
1211
static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1212
{
1213
struct ath10k *ar = arg;
1214
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1215
int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1216
1217
if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1218
ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1219
ce_id);
1220
return IRQ_HANDLED;
1221
}
1222
1223
ath10k_ce_disable_interrupt(ar, ce_id);
1224
set_bit(ce_id, ar_snoc->pending_ce_irqs);
1225
1226
napi_schedule(&ar->napi);
1227
1228
return IRQ_HANDLED;
1229
}
1230
1231
static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1232
{
1233
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1234
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1235
int done = 0;
1236
int ce_id;
1237
1238
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1239
napi_complete(ctx);
1240
return done;
1241
}
1242
1243
for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1244
if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
1245
ath10k_ce_per_engine_service(ar, ce_id);
1246
ath10k_ce_enable_interrupt(ar, ce_id);
1247
}
1248
1249
done = ath10k_htt_txrx_compl_task(ar, budget);
1250
1251
if (done < budget)
1252
napi_complete(ctx);
1253
1254
return done;
1255
}
1256
1257
static void ath10k_snoc_init_napi(struct ath10k *ar)
1258
{
1259
netif_napi_add(ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
1260
}
1261
1262
static int ath10k_snoc_request_irq(struct ath10k *ar)
1263
{
1264
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1265
int ret, id;
1266
1267
for (id = 0; id < CE_COUNT_MAX; id++) {
1268
ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1269
ath10k_snoc_per_engine_handler,
1270
IRQF_NO_AUTOEN, ce_name[id], ar);
1271
if (ret) {
1272
ath10k_err(ar,
1273
"failed to register IRQ handler for CE %d: %d\n",
1274
id, ret);
1275
goto err_irq;
1276
}
1277
}
1278
1279
return 0;
1280
1281
err_irq:
1282
for (id -= 1; id >= 0; id--)
1283
free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1284
1285
return ret;
1286
}
1287
1288
static void ath10k_snoc_free_irq(struct ath10k *ar)
1289
{
1290
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1291
int id;
1292
1293
for (id = 0; id < CE_COUNT_MAX; id++)
1294
free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1295
}
1296
1297
static int ath10k_snoc_resource_init(struct ath10k *ar)
1298
{
1299
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1300
struct platform_device *pdev;
1301
struct resource *res;
1302
int i, ret = 0;
1303
1304
pdev = ar_snoc->dev;
1305
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1306
if (!res) {
1307
ath10k_err(ar, "Memory base not found in DT\n");
1308
return -EINVAL;
1309
}
1310
1311
ar_snoc->mem_pa = res->start;
1312
ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1313
resource_size(res));
1314
if (!ar_snoc->mem) {
1315
ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1316
&ar_snoc->mem_pa);
1317
return -EINVAL;
1318
}
1319
1320
for (i = 0; i < CE_COUNT; i++) {
1321
ret = platform_get_irq(ar_snoc->dev, i);
1322
if (ret < 0)
1323
return ret;
1324
ar_snoc->ce_irqs[i].irq_line = ret;
1325
}
1326
1327
ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
1328
&ar_snoc->xo_cal_data);
1329
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
1330
if (ret == 0) {
1331
ar_snoc->xo_cal_supported = true;
1332
ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
1333
ar_snoc->xo_cal_data);
1334
}
1335
1336
return 0;
1337
}
1338
1339
static void ath10k_snoc_quirks_init(struct ath10k *ar)
1340
{
1341
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1342
struct device *dev = &ar_snoc->dev->dev;
1343
1344
/* ignore errors, keep NULL if there is no property */
1345
of_property_read_string(dev->of_node, "firmware-name", &ar->board_name);
1346
1347
if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
1348
set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
1349
}
1350
1351
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1352
{
1353
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1354
struct ath10k_bus_params bus_params = {};
1355
int ret;
1356
1357
if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1358
return 0;
1359
1360
switch (type) {
1361
case ATH10K_QMI_EVENT_FW_READY_IND:
1362
if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1363
ath10k_core_start_recovery(ar);
1364
break;
1365
}
1366
1367
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1368
bus_params.chip_id = ar_snoc->target_info.soc_version;
1369
ret = ath10k_core_register(ar, &bus_params);
1370
if (ret) {
1371
ath10k_err(ar, "Failed to register driver core: %d\n",
1372
ret);
1373
return ret;
1374
}
1375
set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1376
break;
1377
case ATH10K_QMI_EVENT_FW_DOWN_IND:
1378
set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1379
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1380
break;
1381
default:
1382
ath10k_err(ar, "invalid fw indication: %llx\n", type);
1383
return -EINVAL;
1384
}
1385
1386
return 0;
1387
}
1388
1389
static int ath10k_snoc_setup_resource(struct ath10k *ar)
1390
{
1391
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1392
struct ath10k_ce *ce = ath10k_ce_priv(ar);
1393
struct ath10k_snoc_pipe *pipe;
1394
int i, ret;
1395
1396
timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1397
spin_lock_init(&ce->ce_lock);
1398
for (i = 0; i < CE_COUNT; i++) {
1399
pipe = &ar_snoc->pipe_info[i];
1400
pipe->ce_hdl = &ce->ce_states[i];
1401
pipe->pipe_num = i;
1402
pipe->hif_ce_state = ar;
1403
1404
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1405
if (ret) {
1406
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1407
i, ret);
1408
return ret;
1409
}
1410
1411
pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1412
}
1413
ath10k_snoc_init_napi(ar);
1414
1415
return 0;
1416
}
1417
1418
static void ath10k_snoc_release_resource(struct ath10k *ar)
1419
{
1420
int i;
1421
1422
netif_napi_del(&ar->napi);
1423
for (i = 0; i < CE_COUNT; i++)
1424
ath10k_ce_free_pipe(ar, i);
1425
}
1426
1427
static void ath10k_msa_dump_memory(struct ath10k *ar,
1428
struct ath10k_fw_crash_data *crash_data)
1429
{
1430
const struct ath10k_hw_mem_layout *mem_layout;
1431
const struct ath10k_mem_region *current_region;
1432
struct ath10k_dump_ram_data_hdr *hdr;
1433
size_t buf_len;
1434
u8 *buf;
1435
1436
if (!crash_data || !crash_data->ramdump_buf)
1437
return;
1438
1439
mem_layout = ath10k_coredump_get_mem_layout(ar);
1440
if (!mem_layout)
1441
return;
1442
1443
current_region = &mem_layout->region_table.regions[0];
1444
1445
buf = crash_data->ramdump_buf;
1446
buf_len = crash_data->ramdump_buf_len;
1447
memset(buf, 0, buf_len);
1448
1449
/* Reserve space for the header. */
1450
hdr = (void *)buf;
1451
buf += sizeof(*hdr);
1452
buf_len -= sizeof(*hdr);
1453
1454
hdr->region_type = cpu_to_le32(current_region->type);
1455
hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
1456
hdr->length = cpu_to_le32(ar->msa.mem_size);
1457
1458
if (current_region->len < ar->msa.mem_size) {
1459
memcpy(buf, ar->msa.vaddr, current_region->len);
1460
ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
1461
current_region->len, ar->msa.mem_size);
1462
} else {
1463
memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
1464
}
1465
}
1466
1467
void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
1468
{
1469
struct ath10k_fw_crash_data *crash_data;
1470
char guid[UUID_STRING_LEN + 1];
1471
1472
mutex_lock(&ar->dump_mutex);
1473
1474
spin_lock_bh(&ar->data_lock);
1475
ar->stats.fw_crash_counter++;
1476
spin_unlock_bh(&ar->data_lock);
1477
1478
crash_data = ath10k_coredump_new(ar);
1479
1480
if (crash_data)
1481
scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1482
else
1483
scnprintf(guid, sizeof(guid), "n/a");
1484
1485
ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1486
ath10k_print_driver_info(ar);
1487
ath10k_msa_dump_memory(ar, crash_data);
1488
mutex_unlock(&ar->dump_mutex);
1489
}
1490
1491
static int ath10k_snoc_modem_notify(struct notifier_block *nb, unsigned long action,
1492
void *data)
1493
{
1494
struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, nb);
1495
struct ath10k *ar = ar_snoc->ar;
1496
struct qcom_ssr_notify_data *notify_data = data;
1497
1498
switch (action) {
1499
case QCOM_SSR_BEFORE_POWERUP:
1500
ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem starting event\n");
1501
clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
1502
break;
1503
1504
case QCOM_SSR_AFTER_POWERUP:
1505
ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem running event\n");
1506
break;
1507
1508
case QCOM_SSR_BEFORE_SHUTDOWN:
1509
ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem %s event\n",
1510
notify_data->crashed ? "crashed" : "stopping");
1511
if (!notify_data->crashed)
1512
set_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
1513
else
1514
clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
1515
break;
1516
1517
case QCOM_SSR_AFTER_SHUTDOWN:
1518
ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem offline event\n");
1519
break;
1520
1521
default:
1522
ath10k_err(ar, "received unrecognized event %lu\n", action);
1523
break;
1524
}
1525
1526
return NOTIFY_OK;
1527
}
1528
1529
static int ath10k_modem_init(struct ath10k *ar)
1530
{
1531
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1532
void *notifier;
1533
int ret;
1534
1535
ar_snoc->nb.notifier_call = ath10k_snoc_modem_notify;
1536
1537
notifier = qcom_register_ssr_notifier("mpss", &ar_snoc->nb);
1538
if (IS_ERR(notifier)) {
1539
ret = PTR_ERR(notifier);
1540
ath10k_err(ar, "failed to initialize modem notifier: %d\n", ret);
1541
return ret;
1542
}
1543
1544
ar_snoc->notifier = notifier;
1545
1546
return 0;
1547
}
1548
1549
static void ath10k_modem_deinit(struct ath10k *ar)
1550
{
1551
int ret;
1552
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1553
1554
ret = qcom_unregister_ssr_notifier(ar_snoc->notifier, &ar_snoc->nb);
1555
if (ret)
1556
ath10k_err(ar, "error %d unregistering notifier\n", ret);
1557
}
1558
1559
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
1560
{
1561
struct device *dev = ar->dev;
1562
struct resource r;
1563
int ret;
1564
1565
ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
1566
if (!ret) {
1567
ar->msa.paddr = r.start;
1568
ar->msa.mem_size = resource_size(&r);
1569
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
1570
ar->msa.mem_size,
1571
MEMREMAP_WT);
1572
if (IS_ERR(ar->msa.vaddr)) {
1573
dev_err(dev, "failed to map memory region: %pa\n",
1574
&r.start);
1575
return PTR_ERR(ar->msa.vaddr);
1576
}
1577
} else {
1578
ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
1579
&ar->msa.paddr,
1580
GFP_KERNEL);
1581
if (!ar->msa.vaddr) {
1582
ath10k_err(ar, "failed to allocate dma memory for msa region\n");
1583
return -ENOMEM;
1584
}
1585
ar->msa.mem_size = msa_size;
1586
}
1587
1588
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
1589
&ar->msa.paddr,
1590
ar->msa.vaddr);
1591
1592
return 0;
1593
}
1594
1595
static int ath10k_fw_init(struct ath10k *ar)
1596
{
1597
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1598
struct device *host_dev = &ar_snoc->dev->dev;
1599
struct platform_device_info info;
1600
struct iommu_domain *iommu_dom;
1601
struct platform_device *pdev;
1602
struct device_node *node;
1603
int ret;
1604
1605
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1606
if (!node) {
1607
ar_snoc->use_tz = true;
1608
return 0;
1609
}
1610
1611
memset(&info, 0, sizeof(info));
1612
info.fwnode = &node->fwnode;
1613
info.parent = host_dev;
1614
info.name = node->name;
1615
info.dma_mask = DMA_BIT_MASK(32);
1616
1617
pdev = platform_device_register_full(&info);
1618
if (IS_ERR(pdev)) {
1619
of_node_put(node);
1620
return PTR_ERR(pdev);
1621
}
1622
1623
pdev->dev.of_node = node;
1624
1625
ret = of_dma_configure(&pdev->dev, node, true);
1626
if (ret) {
1627
ath10k_err(ar, "dma configure fail: %d\n", ret);
1628
goto err_unregister;
1629
}
1630
1631
ar_snoc->fw.dev = &pdev->dev;
1632
1633
iommu_dom = iommu_paging_domain_alloc(ar_snoc->fw.dev);
1634
if (IS_ERR(iommu_dom)) {
1635
ath10k_err(ar, "failed to allocate iommu domain\n");
1636
ret = PTR_ERR(iommu_dom);
1637
goto err_unregister;
1638
}
1639
1640
ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
1641
if (ret) {
1642
ath10k_err(ar, "could not attach device: %d\n", ret);
1643
goto err_iommu_free;
1644
}
1645
1646
ar_snoc->fw.iommu_domain = iommu_dom;
1647
ar_snoc->fw.fw_start_addr = ar->msa.paddr;
1648
1649
ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
1650
ar->msa.paddr, ar->msa.mem_size,
1651
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1652
if (ret) {
1653
ath10k_err(ar, "failed to map firmware region: %d\n", ret);
1654
goto err_iommu_detach;
1655
}
1656
1657
of_node_put(node);
1658
1659
return 0;
1660
1661
err_iommu_detach:
1662
iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
1663
1664
err_iommu_free:
1665
iommu_domain_free(iommu_dom);
1666
1667
err_unregister:
1668
platform_device_unregister(pdev);
1669
of_node_put(node);
1670
1671
return ret;
1672
}
1673
1674
static int ath10k_fw_deinit(struct ath10k *ar)
1675
{
1676
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1677
const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
1678
struct iommu_domain *iommu;
1679
size_t unmapped_size;
1680
1681
if (ar_snoc->use_tz)
1682
return 0;
1683
1684
iommu = ar_snoc->fw.iommu_domain;
1685
1686
unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
1687
mapped_size);
1688
if (unmapped_size != mapped_size)
1689
ath10k_err(ar, "failed to unmap firmware: %zu\n",
1690
unmapped_size);
1691
1692
iommu_detach_device(iommu, ar_snoc->fw.dev);
1693
iommu_domain_free(iommu);
1694
1695
platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
1696
1697
return 0;
1698
}
1699
1700
static const struct of_device_id ath10k_snoc_dt_match[] = {
1701
{ .compatible = "qcom,wcn3990-wifi",
1702
.data = &drv_priv,
1703
},
1704
{ }
1705
};
1706
MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1707
1708
static int ath10k_snoc_probe(struct platform_device *pdev)
1709
{
1710
const struct ath10k_snoc_drv_priv *drv_data;
1711
struct ath10k_snoc *ar_snoc;
1712
struct device *dev;
1713
struct ath10k *ar;
1714
u32 msa_size;
1715
int ret;
1716
u32 i;
1717
1718
dev = &pdev->dev;
1719
drv_data = device_get_match_data(dev);
1720
if (!drv_data) {
1721
dev_err(dev, "failed to find matching device tree id\n");
1722
return -EINVAL;
1723
}
1724
1725
ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1726
if (ret) {
1727
dev_err(dev, "failed to set dma mask: %d\n", ret);
1728
return ret;
1729
}
1730
1731
ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1732
drv_data->hw_rev, &ath10k_snoc_hif_ops);
1733
if (!ar) {
1734
dev_err(dev, "failed to allocate core\n");
1735
return -ENOMEM;
1736
}
1737
1738
ar_snoc = ath10k_snoc_priv(ar);
1739
ar_snoc->dev = pdev;
1740
platform_set_drvdata(pdev, ar);
1741
ar_snoc->ar = ar;
1742
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1743
ar->ce_priv = &ar_snoc->ce;
1744
msa_size = drv_data->msa_size;
1745
1746
ath10k_snoc_quirks_init(ar);
1747
1748
ret = ath10k_snoc_resource_init(ar);
1749
if (ret) {
1750
ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1751
goto err_core_destroy;
1752
}
1753
1754
ret = ath10k_snoc_setup_resource(ar);
1755
if (ret) {
1756
ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1757
goto err_core_destroy;
1758
}
1759
ret = ath10k_snoc_request_irq(ar);
1760
if (ret) {
1761
ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1762
goto err_release_resource;
1763
}
1764
1765
ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
1766
ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
1767
sizeof(*ar_snoc->vregs), GFP_KERNEL);
1768
if (!ar_snoc->vregs) {
1769
ret = -ENOMEM;
1770
goto err_free_irq;
1771
}
1772
for (i = 0; i < ar_snoc->num_vregs; i++)
1773
ar_snoc->vregs[i].supply = ath10k_regulators[i];
1774
1775
ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
1776
ar_snoc->vregs);
1777
if (ret < 0)
1778
goto err_free_irq;
1779
1780
ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
1781
ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
1782
sizeof(*ar_snoc->clks), GFP_KERNEL);
1783
if (!ar_snoc->clks) {
1784
ret = -ENOMEM;
1785
goto err_free_irq;
1786
}
1787
1788
for (i = 0; i < ar_snoc->num_clks; i++)
1789
ar_snoc->clks[i].id = ath10k_clocks[i];
1790
1791
ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
1792
ar_snoc->clks);
1793
if (ret)
1794
goto err_free_irq;
1795
1796
ret = ath10k_setup_msa_resources(ar, msa_size);
1797
if (ret) {
1798
ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
1799
goto err_free_irq;
1800
}
1801
1802
ret = ath10k_fw_init(ar);
1803
if (ret) {
1804
ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
1805
goto err_free_irq;
1806
}
1807
1808
ret = ath10k_qmi_init(ar, msa_size);
1809
if (ret) {
1810
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1811
goto err_fw_deinit;
1812
}
1813
1814
ret = ath10k_modem_init(ar);
1815
if (ret)
1816
goto err_qmi_deinit;
1817
1818
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1819
1820
return 0;
1821
1822
err_qmi_deinit:
1823
ath10k_qmi_deinit(ar);
1824
1825
err_fw_deinit:
1826
ath10k_fw_deinit(ar);
1827
1828
err_free_irq:
1829
ath10k_snoc_free_irq(ar);
1830
1831
err_release_resource:
1832
ath10k_snoc_release_resource(ar);
1833
1834
err_core_destroy:
1835
ath10k_core_destroy(ar);
1836
1837
return ret;
1838
}
1839
1840
static int ath10k_snoc_free_resources(struct ath10k *ar)
1841
{
1842
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1843
1844
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n");
1845
1846
set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1847
1848
ath10k_core_unregister(ar);
1849
ath10k_fw_deinit(ar);
1850
ath10k_snoc_free_irq(ar);
1851
ath10k_snoc_release_resource(ar);
1852
ath10k_modem_deinit(ar);
1853
ath10k_qmi_deinit(ar);
1854
ath10k_core_destroy(ar);
1855
1856
return 0;
1857
}
1858
1859
static void ath10k_snoc_remove(struct platform_device *pdev)
1860
{
1861
struct ath10k *ar = platform_get_drvdata(pdev);
1862
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1863
1864
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1865
1866
reinit_completion(&ar->driver_recovery);
1867
1868
if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1869
wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1870
1871
ath10k_snoc_free_resources(ar);
1872
}
1873
1874
static void ath10k_snoc_shutdown(struct platform_device *pdev)
1875
{
1876
struct ath10k *ar = platform_get_drvdata(pdev);
1877
1878
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
1879
ath10k_snoc_free_resources(ar);
1880
}
1881
1882
static struct platform_driver ath10k_snoc_driver = {
1883
.probe = ath10k_snoc_probe,
1884
.remove = ath10k_snoc_remove,
1885
.shutdown = ath10k_snoc_shutdown,
1886
.driver = {
1887
.name = "ath10k_snoc",
1888
.of_match_table = ath10k_snoc_dt_match,
1889
},
1890
};
1891
module_platform_driver(ath10k_snoc_driver);
1892
1893
MODULE_AUTHOR("Qualcomm");
1894
MODULE_LICENSE("Dual BSD/GPL");
1895
MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1896
1897