Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath11k/pcic.c
105641 views
1
// SPDX-License-Identifier: BSD-3-Clause-Clear
2
/*
3
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6
*/
7
8
#include <linux/export.h>
9
#include "core.h"
10
#include "pcic.h"
11
#include "debug.h"
12
13
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
14
"bhi",
15
"mhi-er0",
16
"mhi-er1",
17
"ce0",
18
"ce1",
19
"ce2",
20
"ce3",
21
"ce4",
22
"ce5",
23
"ce6",
24
"ce7",
25
"ce8",
26
"ce9",
27
"ce10",
28
"ce11",
29
"host2wbm-desc-feed",
30
"host2reo-re-injection",
31
"host2reo-command",
32
"host2rxdma-monitor-ring3",
33
"host2rxdma-monitor-ring2",
34
"host2rxdma-monitor-ring1",
35
"reo2ost-exception",
36
"wbm2host-rx-release",
37
"reo2host-status",
38
"reo2host-destination-ring4",
39
"reo2host-destination-ring3",
40
"reo2host-destination-ring2",
41
"reo2host-destination-ring1",
42
"rxdma2host-monitor-destination-mac3",
43
"rxdma2host-monitor-destination-mac2",
44
"rxdma2host-monitor-destination-mac1",
45
"ppdu-end-interrupts-mac3",
46
"ppdu-end-interrupts-mac2",
47
"ppdu-end-interrupts-mac1",
48
"rxdma2host-monitor-status-ring-mac3",
49
"rxdma2host-monitor-status-ring-mac2",
50
"rxdma2host-monitor-status-ring-mac1",
51
"host2rxdma-host-buf-ring-mac3",
52
"host2rxdma-host-buf-ring-mac2",
53
"host2rxdma-host-buf-ring-mac1",
54
"rxdma2host-destination-ring-mac3",
55
"rxdma2host-destination-ring-mac2",
56
"rxdma2host-destination-ring-mac1",
57
"host2tcl-input-ring4",
58
"host2tcl-input-ring3",
59
"host2tcl-input-ring2",
60
"host2tcl-input-ring1",
61
"wbm2host-tx-completions-ring3",
62
"wbm2host-tx-completions-ring2",
63
"wbm2host-tx-completions-ring1",
64
"tcl2host-status-ring",
65
};
66
67
static const struct ath11k_msi_config ath11k_msi_config[] = {
68
{
69
.total_vectors = 32,
70
.total_users = 4,
71
.users = (struct ath11k_msi_user[]) {
72
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
73
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
74
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
75
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
76
},
77
.hw_rev = ATH11K_HW_QCA6390_HW20,
78
},
79
{
80
.total_vectors = 16,
81
.total_users = 3,
82
.users = (struct ath11k_msi_user[]) {
83
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
84
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
85
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
86
},
87
.hw_rev = ATH11K_HW_QCN9074_HW10,
88
},
89
{
90
.total_vectors = 32,
91
.total_users = 4,
92
.users = (struct ath11k_msi_user[]) {
93
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
94
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
95
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
96
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
97
},
98
.hw_rev = ATH11K_HW_WCN6855_HW20,
99
},
100
{
101
.total_vectors = 32,
102
.total_users = 4,
103
.users = (struct ath11k_msi_user[]) {
104
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
105
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
106
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
107
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
108
},
109
.hw_rev = ATH11K_HW_WCN6855_HW21,
110
},
111
{
112
.total_vectors = 28,
113
.total_users = 2,
114
.users = (struct ath11k_msi_user[]) {
115
{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
116
{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
117
},
118
.hw_rev = ATH11K_HW_WCN6750_HW10,
119
},
120
{
121
.total_vectors = 32,
122
.total_users = 4,
123
.users = (struct ath11k_msi_user[]) {
124
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
125
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
126
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
127
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
128
},
129
.hw_rev = ATH11K_HW_QCA2066_HW21,
130
},
131
{
132
.total_vectors = 32,
133
.total_users = 4,
134
.users = (struct ath11k_msi_user[]) {
135
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
136
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
137
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
138
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
139
},
140
.hw_rev = ATH11K_HW_QCA6698AQ_HW21,
141
},
142
};
143
144
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
145
{
146
const struct ath11k_msi_config *msi_config;
147
int i;
148
149
for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
150
msi_config = &ath11k_msi_config[i];
151
152
if (msi_config->hw_rev == ab->hw_rev)
153
break;
154
}
155
156
if (i == ARRAY_SIZE(ath11k_msi_config)) {
157
ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
158
ab->hw_rev);
159
return -EINVAL;
160
}
161
162
ab->pci.msi.config = msi_config;
163
return 0;
164
}
165
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
166
167
static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
168
{
169
if (offset < ATH11K_PCI_WINDOW_START)
170
#if defined(__linux__)
171
iowrite32(value, ab->mem + offset);
172
#elif defined(__FreeBSD__)
173
iowrite32(value, (char *)ab->mem + offset);
174
#endif
175
else
176
ab->pci.ops->window_write32(ab, offset, value);
177
}
178
179
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
180
{
181
int ret = 0;
182
bool wakeup_required;
183
184
/* for offset beyond BAR + 4K - 32, may
185
* need to wakeup the device to access.
186
*/
187
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
188
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
189
if (wakeup_required && ab->pci.ops->wakeup)
190
ret = ab->pci.ops->wakeup(ab);
191
192
__ath11k_pcic_write32(ab, offset, value);
193
194
if (wakeup_required && !ret && ab->pci.ops->release)
195
ab->pci.ops->release(ab);
196
}
197
EXPORT_SYMBOL(ath11k_pcic_write32);
198
199
static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
200
{
201
u32 val;
202
203
if (offset < ATH11K_PCI_WINDOW_START)
204
#if defined(__linux__)
205
val = ioread32(ab->mem + offset);
206
#elif defined(__FreeBSD__)
207
val = ioread32((char *)ab->mem + offset);
208
#endif
209
else
210
val = ab->pci.ops->window_read32(ab, offset);
211
212
return val;
213
}
214
215
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
216
{
217
int ret = 0;
218
u32 val;
219
bool wakeup_required;
220
221
/* for offset beyond BAR + 4K - 32, may
222
* need to wakeup the device to access.
223
*/
224
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
225
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
226
if (wakeup_required && ab->pci.ops->wakeup)
227
ret = ab->pci.ops->wakeup(ab);
228
229
val = __ath11k_pcic_read32(ab, offset);
230
231
if (wakeup_required && !ret && ab->pci.ops->release)
232
ab->pci.ops->release(ab);
233
234
return val;
235
}
236
EXPORT_SYMBOL(ath11k_pcic_read32);
237
238
int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
239
{
240
int ret = 0;
241
bool wakeup_required;
242
u32 *data = buf;
243
u32 i;
244
245
/* for offset beyond BAR + 4K - 32, may
246
* need to wakeup the device to access.
247
*/
248
wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
249
end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
250
if (wakeup_required && ab->pci.ops->wakeup) {
251
ret = ab->pci.ops->wakeup(ab);
252
if (ret) {
253
ath11k_warn(ab,
254
"wakeup failed, data may be invalid: %d",
255
ret);
256
/* Even though wakeup() failed, continue processing rather
257
* than returning because some parts of the data may still
258
* be valid and useful in some cases, e.g. could give us
259
* some clues on firmware crash.
260
* Mislead due to invalid data could be avoided because we
261
* are aware of the wakeup failure.
262
*/
263
}
264
}
265
266
for (i = start; i < end + 1; i += 4)
267
*data++ = __ath11k_pcic_read32(ab, i);
268
269
if (wakeup_required && ab->pci.ops->release)
270
ab->pci.ops->release(ab);
271
272
return 0;
273
}
274
EXPORT_SYMBOL(ath11k_pcic_read);
275
276
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
277
u32 *msi_addr_hi)
278
{
279
*msi_addr_lo = ab->pci.msi.addr_lo;
280
*msi_addr_hi = ab->pci.msi.addr_hi;
281
}
282
EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
283
284
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
285
int *num_vectors, u32 *user_base_data,
286
u32 *base_vector)
287
{
288
const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
289
int idx;
290
291
for (idx = 0; idx < msi_config->total_users; idx++) {
292
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
293
*num_vectors = msi_config->users[idx].num_vectors;
294
*base_vector = msi_config->users[idx].base_vector;
295
*user_base_data = *base_vector + ab->pci.msi.ep_base_data;
296
297
ath11k_dbg(ab, ATH11K_DBG_PCI,
298
"msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
299
user_name, *num_vectors, *user_base_data,
300
*base_vector);
301
302
return 0;
303
}
304
}
305
306
ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
307
308
return -EINVAL;
309
}
310
EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
311
312
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
313
{
314
u32 i, msi_data_idx;
315
316
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
317
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
318
continue;
319
320
if (ce_id == i)
321
break;
322
323
msi_data_idx++;
324
}
325
*msi_idx = msi_data_idx;
326
}
327
EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
328
329
static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
330
{
331
int i, j;
332
333
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
334
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
335
336
for (j = 0; j < irq_grp->num_irq; j++)
337
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
338
339
netif_napi_del(&irq_grp->napi);
340
free_netdev(irq_grp->napi_ndev);
341
}
342
}
343
344
void ath11k_pcic_free_irq(struct ath11k_base *ab)
345
{
346
int i, irq_idx;
347
348
for (i = 0; i < ab->hw_params.ce_count; i++) {
349
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
350
continue;
351
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
352
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
353
}
354
355
ath11k_pcic_free_ext_irq(ab);
356
}
357
EXPORT_SYMBOL(ath11k_pcic_free_irq);
358
359
static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
360
{
361
u32 irq_idx;
362
363
/* In case of one MSI vector, we handle irq enable/disable in a
364
* uniform way since we only have one irq
365
*/
366
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
367
return;
368
369
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
370
enable_irq(ab->irq_num[irq_idx]);
371
}
372
373
static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
374
{
375
u32 irq_idx;
376
377
/* In case of one MSI vector, we handle irq enable/disable in a
378
* uniform way since we only have one irq
379
*/
380
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
381
return;
382
383
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
384
disable_irq_nosync(ab->irq_num[irq_idx]);
385
}
386
387
static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
388
{
389
int i;
390
391
clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
392
393
for (i = 0; i < ab->hw_params.ce_count; i++) {
394
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
395
continue;
396
ath11k_pcic_ce_irq_disable(ab, i);
397
}
398
}
399
400
static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
401
{
402
int i;
403
int irq_idx;
404
405
for (i = 0; i < ab->hw_params.ce_count; i++) {
406
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
407
continue;
408
409
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
410
synchronize_irq(ab->irq_num[irq_idx]);
411
}
412
}
413
414
static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
415
{
416
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
417
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
418
419
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
420
421
enable_irq(ce_pipe->ab->irq_num[irq_idx]);
422
}
423
424
static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
425
{
426
struct ath11k_ce_pipe *ce_pipe = arg;
427
struct ath11k_base *ab = ce_pipe->ab;
428
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
429
430
if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
431
return IRQ_HANDLED;
432
433
/* last interrupt received for this CE */
434
ce_pipe->timestamp = jiffies;
435
436
disable_irq_nosync(ab->irq_num[irq_idx]);
437
438
tasklet_schedule(&ce_pipe->intr_tq);
439
440
return IRQ_HANDLED;
441
}
442
443
static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
444
{
445
struct ath11k_base *ab = irq_grp->ab;
446
int i;
447
448
/* In case of one MSI vector, we handle irq enable/disable
449
* in a uniform way since we only have one irq
450
*/
451
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
452
return;
453
454
for (i = 0; i < irq_grp->num_irq; i++)
455
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
456
}
457
458
static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
459
{
460
int i;
461
462
clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
463
464
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
465
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
466
467
ath11k_pcic_ext_grp_disable(irq_grp);
468
469
if (irq_grp->napi_enabled) {
470
napi_synchronize(&irq_grp->napi);
471
napi_disable(&irq_grp->napi);
472
irq_grp->napi_enabled = false;
473
}
474
}
475
}
476
477
static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
478
{
479
struct ath11k_base *ab = irq_grp->ab;
480
int i;
481
482
/* In case of one MSI vector, we handle irq enable/disable in a
483
* uniform way since we only have one irq
484
*/
485
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
486
return;
487
488
for (i = 0; i < irq_grp->num_irq; i++)
489
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
490
}
491
492
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
493
{
494
int i;
495
496
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
497
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
498
499
if (!irq_grp->napi_enabled) {
500
napi_enable(&irq_grp->napi);
501
irq_grp->napi_enabled = true;
502
}
503
ath11k_pcic_ext_grp_enable(irq_grp);
504
}
505
506
set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
507
}
508
EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
509
510
static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
511
{
512
int i, j, irq_idx;
513
514
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
515
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
516
517
for (j = 0; j < irq_grp->num_irq; j++) {
518
irq_idx = irq_grp->irqs[j];
519
synchronize_irq(ab->irq_num[irq_idx]);
520
}
521
}
522
}
523
524
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
525
{
526
__ath11k_pcic_ext_irq_disable(ab);
527
ath11k_pcic_sync_ext_irqs(ab);
528
}
529
EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
530
531
static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
532
{
533
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
534
struct ath11k_ext_irq_grp,
535
napi);
536
struct ath11k_base *ab = irq_grp->ab;
537
int work_done;
538
int i;
539
540
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
541
if (work_done < budget) {
542
napi_complete_done(napi, work_done);
543
for (i = 0; i < irq_grp->num_irq; i++)
544
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
545
}
546
547
if (work_done > budget)
548
work_done = budget;
549
550
return work_done;
551
}
552
553
static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
554
{
555
struct ath11k_ext_irq_grp *irq_grp = arg;
556
struct ath11k_base *ab = irq_grp->ab;
557
int i;
558
559
if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
560
return IRQ_HANDLED;
561
562
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
563
564
/* last interrupt received for this group */
565
irq_grp->timestamp = jiffies;
566
567
for (i = 0; i < irq_grp->num_irq; i++)
568
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
569
570
napi_schedule(&irq_grp->napi);
571
572
return IRQ_HANDLED;
573
}
574
575
static int
576
ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
577
{
578
return ab->pci.ops->get_msi_irq(ab, vector);
579
}
580
581
static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
582
{
583
int i, j, n, ret, num_vectors = 0;
584
u32 user_base_data = 0, base_vector = 0;
585
struct ath11k_ext_irq_grp *irq_grp;
586
unsigned long irq_flags;
587
588
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
589
&user_base_data,
590
&base_vector);
591
if (ret < 0)
592
return ret;
593
594
irq_flags = IRQF_SHARED;
595
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
596
irq_flags |= IRQF_NOBALANCING;
597
598
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
599
irq_grp = &ab->ext_irq_grp[i];
600
u32 num_irq = 0;
601
602
irq_grp->ab = ab;
603
irq_grp->grp_id = i;
604
irq_grp->napi_ndev = alloc_netdev_dummy(0);
605
if (!irq_grp->napi_ndev) {
606
ret = -ENOMEM;
607
goto fail_allocate;
608
}
609
610
netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
611
ath11k_pcic_ext_grp_napi_poll);
612
613
if (ab->hw_params.ring_mask->tx[i] ||
614
ab->hw_params.ring_mask->rx[i] ||
615
ab->hw_params.ring_mask->rx_err[i] ||
616
ab->hw_params.ring_mask->rx_wbm_rel[i] ||
617
ab->hw_params.ring_mask->reo_status[i] ||
618
ab->hw_params.ring_mask->rxdma2host[i] ||
619
ab->hw_params.ring_mask->host2rxdma[i] ||
620
ab->hw_params.ring_mask->rx_mon_status[i]) {
621
num_irq = 1;
622
}
623
624
irq_grp->num_irq = num_irq;
625
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
626
627
for (j = 0; j < irq_grp->num_irq; j++) {
628
int irq_idx = irq_grp->irqs[j];
629
int vector = (i % num_vectors) + base_vector;
630
int irq = ath11k_pcic_get_msi_irq(ab, vector);
631
632
if (irq < 0) {
633
ret = irq;
634
goto fail_irq;
635
}
636
637
ab->irq_num[irq_idx] = irq;
638
639
ath11k_dbg(ab, ATH11K_DBG_PCI,
640
"irq %d group %d\n", irq, i);
641
642
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
643
ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
644
irq_flags, "DP_EXT_IRQ", irq_grp);
645
if (ret) {
646
ath11k_err(ab, "failed request irq %d: %d\n",
647
vector, ret);
648
for (n = 0; n <= i; n++) {
649
irq_grp = &ab->ext_irq_grp[n];
650
free_netdev(irq_grp->napi_ndev);
651
}
652
return ret;
653
}
654
}
655
ath11k_pcic_ext_grp_disable(irq_grp);
656
}
657
658
return 0;
659
fail_irq:
660
/* i ->napi_ndev was properly allocated. Free it also */
661
i += 1;
662
fail_allocate:
663
for (n = 0; n < i; n++) {
664
irq_grp = &ab->ext_irq_grp[n];
665
free_netdev(irq_grp->napi_ndev);
666
}
667
return ret;
668
}
669
670
int ath11k_pcic_config_irq(struct ath11k_base *ab)
671
{
672
struct ath11k_ce_pipe *ce_pipe;
673
u32 msi_data_start;
674
u32 msi_data_count, msi_data_idx;
675
u32 msi_irq_start;
676
unsigned int msi_data;
677
int irq, i, ret, irq_idx;
678
unsigned long irq_flags;
679
680
ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
681
&msi_data_start, &msi_irq_start);
682
if (ret)
683
return ret;
684
685
irq_flags = IRQF_SHARED;
686
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
687
irq_flags |= IRQF_NOBALANCING;
688
689
/* Configure CE irqs */
690
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
691
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
692
continue;
693
694
msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
695
irq = ath11k_pcic_get_msi_irq(ab, msi_data);
696
if (irq < 0)
697
return irq;
698
699
ce_pipe = &ab->ce.ce_pipe[i];
700
701
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
702
703
tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
704
705
ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
706
irq_flags, irq_name[irq_idx], ce_pipe);
707
if (ret) {
708
ath11k_err(ab, "failed to request irq %d: %d\n",
709
irq_idx, ret);
710
return ret;
711
}
712
713
ab->irq_num[irq_idx] = irq;
714
msi_data_idx++;
715
716
ath11k_pcic_ce_irq_disable(ab, i);
717
}
718
719
ret = ath11k_pcic_ext_irq_config(ab);
720
if (ret)
721
return ret;
722
723
return 0;
724
}
725
EXPORT_SYMBOL(ath11k_pcic_config_irq);
726
727
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
728
{
729
int i;
730
731
set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
732
733
for (i = 0; i < ab->hw_params.ce_count; i++) {
734
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
735
continue;
736
ath11k_pcic_ce_irq_enable(ab, i);
737
}
738
}
739
EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
740
741
static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
742
{
743
int i;
744
745
for (i = 0; i < ab->hw_params.ce_count; i++) {
746
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
747
748
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
749
continue;
750
751
tasklet_kill(&ce_pipe->intr_tq);
752
}
753
}
754
755
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
756
{
757
ath11k_pcic_ce_irqs_disable(ab);
758
ath11k_pcic_sync_ce_irqs(ab);
759
ath11k_pcic_kill_tasklets(ab);
760
}
761
EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
762
763
void ath11k_pcic_stop(struct ath11k_base *ab)
764
{
765
ath11k_pcic_ce_irq_disable_sync(ab);
766
ath11k_ce_cleanup_pipes(ab);
767
}
768
EXPORT_SYMBOL(ath11k_pcic_stop);
769
770
int ath11k_pcic_start(struct ath11k_base *ab)
771
{
772
set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
773
774
ath11k_pcic_ce_irqs_enable(ab);
775
ath11k_ce_rx_post_buf(ab);
776
777
return 0;
778
}
779
EXPORT_SYMBOL(ath11k_pcic_start);
780
781
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
782
u8 *ul_pipe, u8 *dl_pipe)
783
{
784
const struct service_to_pipe *entry;
785
bool ul_set = false, dl_set = false;
786
int i;
787
788
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
789
entry = &ab->hw_params.svc_to_ce_map[i];
790
791
if (__le32_to_cpu(entry->service_id) != service_id)
792
continue;
793
794
switch (__le32_to_cpu(entry->pipedir)) {
795
case PIPEDIR_NONE:
796
break;
797
case PIPEDIR_IN:
798
WARN_ON(dl_set);
799
*dl_pipe = __le32_to_cpu(entry->pipenum);
800
dl_set = true;
801
break;
802
case PIPEDIR_OUT:
803
WARN_ON(ul_set);
804
*ul_pipe = __le32_to_cpu(entry->pipenum);
805
ul_set = true;
806
break;
807
case PIPEDIR_INOUT:
808
WARN_ON(dl_set);
809
WARN_ON(ul_set);
810
*dl_pipe = __le32_to_cpu(entry->pipenum);
811
*ul_pipe = __le32_to_cpu(entry->pipenum);
812
dl_set = true;
813
ul_set = true;
814
break;
815
}
816
}
817
818
if (WARN_ON(!ul_set || !dl_set))
819
return -ENOENT;
820
821
return 0;
822
}
823
EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
824
825
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
826
const struct ath11k_pci_ops *pci_ops)
827
{
828
if (!pci_ops)
829
return 0;
830
831
/* Return error if mandatory pci_ops callbacks are missing */
832
if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
833
!pci_ops->window_read32)
834
return -EINVAL;
835
836
ab->pci.ops = pci_ops;
837
return 0;
838
}
839
EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
840
841
void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
842
{
843
int i;
844
845
for (i = 0; i < ab->hw_params.ce_count; i++) {
846
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
847
i == ATH11K_PCI_CE_WAKE_IRQ)
848
continue;
849
ath11k_pcic_ce_irq_enable(ab, i);
850
}
851
}
852
EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
853
854
void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
855
{
856
int i;
857
int irq_idx;
858
struct ath11k_ce_pipe *ce_pipe;
859
860
for (i = 0; i < ab->hw_params.ce_count; i++) {
861
ce_pipe = &ab->ce.ce_pipe[i];
862
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
863
864
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
865
i == ATH11K_PCI_CE_WAKE_IRQ)
866
continue;
867
868
disable_irq_nosync(ab->irq_num[irq_idx]);
869
synchronize_irq(ab->irq_num[irq_idx]);
870
tasklet_kill(&ce_pipe->intr_tq);
871
}
872
}
873
EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
874
875