Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath10k/sdio.c
105688 views
1
// SPDX-License-Identifier: ISC
2
/*
3
* Copyright (c) 2004-2011 Atheros Communications Inc.
4
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
5
* Copyright (c) 2016-2017 Erik Stromdahl <[email protected]>
6
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7
*/
8
9
#include <linux/module.h>
10
#include <linux/mmc/card.h>
11
#include <linux/mmc/mmc.h>
12
#include <linux/mmc/host.h>
13
#include <linux/mmc/sdio_func.h>
14
#include <linux/mmc/sdio_ids.h>
15
#include <linux/mmc/sdio.h>
16
#include <linux/mmc/sd.h>
17
#include <linux/bitfield.h>
18
#include "core.h"
19
#include "bmi.h"
20
#include "debug.h"
21
#include "hif.h"
22
#include "htc.h"
23
#include "mac.h"
24
#include "targaddrs.h"
25
#include "trace.h"
26
#include "sdio.h"
27
#include "coredump.h"
28
29
void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
30
31
#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
32
33
/* inlined helper functions */
34
35
static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
36
size_t len)
37
{
38
return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
39
}
40
41
static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
42
{
43
return (enum ath10k_htc_ep_id)pipe_id;
44
}
45
46
static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
47
{
48
dev_kfree_skb(pkt->skb);
49
pkt->skb = NULL;
50
pkt->alloc_len = 0;
51
pkt->act_len = 0;
52
pkt->trailer_only = false;
53
}
54
55
static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
56
size_t act_len, size_t full_len,
57
bool part_of_bundle,
58
bool last_in_bundle)
59
{
60
pkt->skb = dev_alloc_skb(full_len);
61
if (!pkt->skb)
62
return -ENOMEM;
63
64
pkt->act_len = act_len;
65
pkt->alloc_len = full_len;
66
pkt->part_of_bundle = part_of_bundle;
67
pkt->last_in_bundle = last_in_bundle;
68
pkt->trailer_only = false;
69
70
return 0;
71
}
72
73
static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
74
{
75
bool trailer_only = false;
76
struct ath10k_htc_hdr *htc_hdr =
77
(struct ath10k_htc_hdr *)pkt->skb->data;
78
u16 len = __le16_to_cpu(htc_hdr->len);
79
80
if (len == htc_hdr->trailer_len)
81
trailer_only = true;
82
83
return trailer_only;
84
}
85
86
/* sdio/mmc functions */
87
88
static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
89
unsigned int address,
90
unsigned char val)
91
{
92
*arg = FIELD_PREP(BIT(31), write) |
93
FIELD_PREP(BIT(27), raw) |
94
FIELD_PREP(BIT(26), 1) |
95
FIELD_PREP(GENMASK(25, 9), address) |
96
FIELD_PREP(BIT(8), 1) |
97
FIELD_PREP(GENMASK(7, 0), val);
98
}
99
100
static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
101
unsigned int address,
102
unsigned char byte)
103
{
104
struct mmc_command io_cmd;
105
106
memset(&io_cmd, 0, sizeof(io_cmd));
107
ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
108
io_cmd.opcode = SD_IO_RW_DIRECT;
109
io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
110
111
return mmc_wait_for_cmd(card->host, &io_cmd, 0);
112
}
113
114
static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
115
unsigned int address,
116
unsigned char *byte)
117
{
118
struct mmc_command io_cmd;
119
int ret;
120
121
memset(&io_cmd, 0, sizeof(io_cmd));
122
ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
123
io_cmd.opcode = SD_IO_RW_DIRECT;
124
io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
125
126
ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
127
if (!ret)
128
*byte = io_cmd.resp[0];
129
130
return ret;
131
}
132
133
static int ath10k_sdio_config(struct ath10k *ar)
134
{
135
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
136
struct sdio_func *func = ar_sdio->func;
137
unsigned char byte, asyncintdelay = 2;
138
int ret;
139
140
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
141
142
sdio_claim_host(func);
143
144
byte = 0;
145
ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
146
SDIO_CCCR_DRIVE_STRENGTH,
147
&byte);
148
149
byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
150
byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
151
ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
152
153
ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
154
SDIO_CCCR_DRIVE_STRENGTH,
155
byte);
156
157
byte = 0;
158
ret = ath10k_sdio_func0_cmd52_rd_byte(
159
func->card,
160
CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
161
&byte);
162
163
byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
164
CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
165
CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
166
167
ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
168
CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
169
byte);
170
if (ret) {
171
ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
172
goto out;
173
}
174
175
byte = 0;
176
ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
177
CCCR_SDIO_IRQ_MODE_REG_SDIO3,
178
&byte);
179
180
byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
181
182
ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
183
CCCR_SDIO_IRQ_MODE_REG_SDIO3,
184
byte);
185
if (ret) {
186
ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
187
ret);
188
goto out;
189
}
190
191
byte = 0;
192
ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
193
CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
194
&byte);
195
196
byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
197
byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
198
199
ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
200
CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
201
byte);
202
203
/* give us some time to enable, in ms */
204
func->enable_timeout = 100;
205
206
ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
207
if (ret) {
208
ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
209
ar_sdio->mbox_info.block_size, ret);
210
goto out;
211
}
212
213
out:
214
sdio_release_host(func);
215
return ret;
216
}
217
218
static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
219
{
220
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
221
struct sdio_func *func = ar_sdio->func;
222
int ret;
223
224
sdio_claim_host(func);
225
226
sdio_writel(func, val, addr, &ret);
227
if (ret) {
228
ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
229
val, addr, ret);
230
goto out;
231
}
232
233
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
234
addr, val);
235
236
out:
237
sdio_release_host(func);
238
239
return ret;
240
}
241
242
static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
243
{
244
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
245
struct sdio_func *func = ar_sdio->func;
246
__le32 *buf;
247
int ret;
248
249
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
250
if (!buf)
251
return -ENOMEM;
252
253
*buf = cpu_to_le32(val);
254
255
sdio_claim_host(func);
256
257
ret = sdio_writesb(func, addr, buf, sizeof(*buf));
258
if (ret) {
259
ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
260
val, addr, ret);
261
goto out;
262
}
263
264
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
265
addr, val);
266
267
out:
268
sdio_release_host(func);
269
270
kfree(buf);
271
272
return ret;
273
}
274
275
static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
276
{
277
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
278
struct sdio_func *func = ar_sdio->func;
279
int ret;
280
281
sdio_claim_host(func);
282
*val = sdio_readl(func, addr, &ret);
283
if (ret) {
284
ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
285
addr, ret);
286
goto out;
287
}
288
289
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
290
addr, *val);
291
292
out:
293
sdio_release_host(func);
294
295
return ret;
296
}
297
298
static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
299
{
300
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
301
struct sdio_func *func = ar_sdio->func;
302
int ret;
303
304
sdio_claim_host(func);
305
306
ret = sdio_memcpy_fromio(func, buf, addr, len);
307
if (ret) {
308
ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
309
addr, ret);
310
goto out;
311
}
312
313
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
314
addr, buf, len);
315
ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
316
317
out:
318
sdio_release_host(func);
319
320
return ret;
321
}
322
323
static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
324
{
325
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
326
struct sdio_func *func = ar_sdio->func;
327
int ret;
328
329
sdio_claim_host(func);
330
331
/* For some reason toio() doesn't have const for the buffer, need
332
* an ugly hack to workaround that.
333
*/
334
ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
335
if (ret) {
336
ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
337
addr, ret);
338
goto out;
339
}
340
341
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
342
addr, buf, len);
343
ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
344
345
out:
346
sdio_release_host(func);
347
348
return ret;
349
}
350
351
static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
352
{
353
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
354
struct sdio_func *func = ar_sdio->func;
355
int ret;
356
357
sdio_claim_host(func);
358
359
len = round_down(len, ar_sdio->mbox_info.block_size);
360
361
ret = sdio_readsb(func, buf, addr, len);
362
if (ret) {
363
ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
364
addr, ret);
365
goto out;
366
}
367
368
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
369
addr, buf, len);
370
ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
371
372
out:
373
sdio_release_host(func);
374
375
return ret;
376
}
377
378
/* HIF mbox functions */
379
380
static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
381
struct ath10k_sdio_rx_data *pkt,
382
u32 *lookaheads,
383
int *n_lookaheads)
384
{
385
struct ath10k_htc *htc = &ar->htc;
386
struct sk_buff *skb = pkt->skb;
387
struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
388
bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
389
enum ath10k_htc_ep_id eid;
390
u8 *trailer;
391
int ret;
392
393
if (trailer_present) {
394
trailer = skb->data + skb->len - htc_hdr->trailer_len;
395
396
eid = pipe_id_to_eid(htc_hdr->eid);
397
398
ret = ath10k_htc_process_trailer(htc,
399
trailer,
400
htc_hdr->trailer_len,
401
eid,
402
lookaheads,
403
n_lookaheads);
404
if (ret)
405
return ret;
406
407
if (is_trailer_only_msg(pkt))
408
pkt->trailer_only = true;
409
410
skb_trim(skb, skb->len - htc_hdr->trailer_len);
411
}
412
413
skb_pull(skb, sizeof(*htc_hdr));
414
415
return 0;
416
}
417
418
static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
419
u32 lookaheads[],
420
int *n_lookahead)
421
{
422
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
423
struct ath10k_htc *htc = &ar->htc;
424
struct ath10k_sdio_rx_data *pkt;
425
struct ath10k_htc_ep *ep;
426
struct ath10k_skb_rxcb *cb;
427
enum ath10k_htc_ep_id id;
428
int ret, i, *n_lookahead_local;
429
u32 *lookaheads_local;
430
int lookahead_idx = 0;
431
432
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
433
lookaheads_local = lookaheads;
434
n_lookahead_local = n_lookahead;
435
436
id = ((struct ath10k_htc_hdr *)
437
&lookaheads[lookahead_idx++])->eid;
438
439
if (id >= ATH10K_HTC_EP_COUNT) {
440
ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
441
id);
442
ret = -ENOMEM;
443
goto out;
444
}
445
446
ep = &htc->endpoint[id];
447
448
if (ep->service_id == 0) {
449
ath10k_warn(ar, "ep %d is not connected\n", id);
450
ret = -ENOMEM;
451
goto out;
452
}
453
454
pkt = &ar_sdio->rx_pkts[i];
455
456
if (pkt->part_of_bundle && !pkt->last_in_bundle) {
457
/* Only read lookahead's from RX trailers
458
* for the last packet in a bundle.
459
*/
460
lookahead_idx--;
461
lookaheads_local = NULL;
462
n_lookahead_local = NULL;
463
}
464
465
ret = ath10k_sdio_mbox_rx_process_packet(ar,
466
pkt,
467
lookaheads_local,
468
n_lookahead_local);
469
if (ret)
470
goto out;
471
472
if (!pkt->trailer_only) {
473
cb = ATH10K_SKB_RXCB(pkt->skb);
474
cb->eid = id;
475
476
skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
477
queue_work(ar->workqueue_aux,
478
&ar_sdio->async_work_rx);
479
} else {
480
kfree_skb(pkt->skb);
481
}
482
483
/* The RX complete handler now owns the skb...*/
484
pkt->skb = NULL;
485
pkt->alloc_len = 0;
486
}
487
488
ret = 0;
489
490
out:
491
/* Free all packets that was not passed on to the RX completion
492
* handler...
493
*/
494
for (; i < ar_sdio->n_rx_pkts; i++)
495
ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
496
497
return ret;
498
}
499
500
static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
501
struct ath10k_sdio_rx_data *rx_pkts,
502
struct ath10k_htc_hdr *htc_hdr,
503
size_t full_len, size_t act_len,
504
size_t *bndl_cnt)
505
{
506
int ret, i;
507
u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
508
509
*bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
510
511
if (*bndl_cnt > max_msgs) {
512
ath10k_warn(ar,
513
"HTC bundle length %u exceeds maximum %u\n",
514
le16_to_cpu(htc_hdr->len),
515
max_msgs);
516
return -ENOMEM;
517
}
518
519
/* Allocate bndl_cnt extra skb's for the bundle.
520
* The package containing the
521
* ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
522
* in bndl_cnt. The skb for that packet will be
523
* allocated separately.
524
*/
525
for (i = 0; i < *bndl_cnt; i++) {
526
ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
527
act_len,
528
full_len,
529
true,
530
false);
531
if (ret)
532
return ret;
533
}
534
535
return 0;
536
}
537
538
static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
539
u32 lookaheads[], int n_lookaheads)
540
{
541
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
542
struct ath10k_htc_hdr *htc_hdr;
543
size_t full_len, act_len;
544
bool last_in_bundle;
545
int ret, i;
546
int pkt_cnt = 0;
547
548
if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
549
ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
550
n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
551
ret = -ENOMEM;
552
goto err;
553
}
554
555
for (i = 0; i < n_lookaheads; i++) {
556
htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
557
last_in_bundle = false;
558
559
if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
560
ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
561
le16_to_cpu(htc_hdr->len),
562
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
563
ret = -ENOMEM;
564
565
ath10k_core_start_recovery(ar);
566
ath10k_warn(ar, "exceeds length, start recovery\n");
567
568
goto err;
569
}
570
571
act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
572
full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
573
574
if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
575
ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
576
htc_hdr->eid, htc_hdr->flags,
577
le16_to_cpu(htc_hdr->len));
578
ret = -EINVAL;
579
goto err;
580
}
581
582
if (ath10k_htc_get_bundle_count(
583
ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
584
/* HTC header indicates that every packet to follow
585
* has the same padded length so that it can be
586
* optimally fetched as a full bundle.
587
*/
588
size_t bndl_cnt;
589
590
ret = ath10k_sdio_mbox_alloc_bundle(ar,
591
&ar_sdio->rx_pkts[pkt_cnt],
592
htc_hdr,
593
full_len,
594
act_len,
595
&bndl_cnt);
596
597
if (ret) {
598
ath10k_warn(ar, "failed to allocate a bundle: %d\n",
599
ret);
600
goto err;
601
}
602
603
pkt_cnt += bndl_cnt;
604
605
/* next buffer will be the last in the bundle */
606
last_in_bundle = true;
607
}
608
609
/* Allocate skb for packet. If the packet had the
610
* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
611
* packet skb's have been allocated in the previous step.
612
*/
613
if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
614
full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
615
616
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
617
act_len,
618
full_len,
619
last_in_bundle,
620
last_in_bundle);
621
if (ret) {
622
ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
623
goto err;
624
}
625
626
pkt_cnt++;
627
}
628
629
ar_sdio->n_rx_pkts = pkt_cnt;
630
631
return 0;
632
633
err:
634
for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
635
if (!ar_sdio->rx_pkts[i].alloc_len)
636
break;
637
ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
638
}
639
640
return ret;
641
}
642
643
static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
644
{
645
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
646
struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
647
struct sk_buff *skb = pkt->skb;
648
struct ath10k_htc_hdr *htc_hdr;
649
int ret;
650
651
ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
652
skb->data, pkt->alloc_len);
653
if (ret)
654
goto err;
655
656
htc_hdr = (struct ath10k_htc_hdr *)skb->data;
657
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
658
659
if (pkt->act_len > pkt->alloc_len) {
660
ret = -EINVAL;
661
goto err;
662
}
663
664
skb_put(skb, pkt->act_len);
665
return 0;
666
667
err:
668
ar_sdio->n_rx_pkts = 0;
669
ath10k_sdio_mbox_free_rx_pkt(pkt);
670
671
return ret;
672
}
673
674
static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
675
{
676
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
677
struct ath10k_sdio_rx_data *pkt;
678
struct ath10k_htc_hdr *htc_hdr;
679
int ret, i;
680
u32 pkt_offset, virt_pkt_len;
681
682
virt_pkt_len = 0;
683
for (i = 0; i < ar_sdio->n_rx_pkts; i++)
684
virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
685
686
if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
687
ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
688
ret = -E2BIG;
689
goto err;
690
}
691
692
ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
693
ar_sdio->vsg_buffer, virt_pkt_len);
694
if (ret) {
695
ath10k_warn(ar, "failed to read bundle packets: %d", ret);
696
goto err;
697
}
698
699
pkt_offset = 0;
700
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
701
pkt = &ar_sdio->rx_pkts[i];
702
htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
703
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
704
705
if (pkt->act_len > pkt->alloc_len) {
706
ret = -EINVAL;
707
goto err;
708
}
709
710
skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
711
pkt_offset += pkt->alloc_len;
712
}
713
714
return 0;
715
716
err:
717
/* Free all packets that was not successfully fetched. */
718
for (i = 0; i < ar_sdio->n_rx_pkts; i++)
719
ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
720
721
ar_sdio->n_rx_pkts = 0;
722
723
return ret;
724
}
725
726
/* This is the timeout for mailbox processing done in the sdio irq
727
* handler. The timeout is deliberately set quite high since SDIO dump logs
728
* over serial port can/will add a substantial overhead to the processing
729
* (if enabled).
730
*/
731
#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
732
733
static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
734
u32 msg_lookahead, bool *done)
735
{
736
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
737
u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
738
int n_lookaheads = 1;
739
unsigned long timeout;
740
int ret;
741
742
*done = true;
743
744
/* Copy the lookahead obtained from the HTC register table into our
745
* temp array as a start value.
746
*/
747
lookaheads[0] = msg_lookahead;
748
749
timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
750
do {
751
/* Try to allocate as many HTC RX packets indicated by
752
* n_lookaheads.
753
*/
754
ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
755
n_lookaheads);
756
if (ret)
757
break;
758
759
if (ar_sdio->n_rx_pkts >= 2)
760
/* A recv bundle was detected, force IRQ status
761
* re-check again.
762
*/
763
*done = false;
764
765
if (ar_sdio->n_rx_pkts > 1)
766
ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
767
else
768
ret = ath10k_sdio_mbox_rx_fetch(ar);
769
770
/* Process fetched packets. This will potentially update
771
* n_lookaheads depending on if the packets contain lookahead
772
* reports.
773
*/
774
n_lookaheads = 0;
775
ret = ath10k_sdio_mbox_rx_process_packets(ar,
776
lookaheads,
777
&n_lookaheads);
778
779
if (!n_lookaheads || ret)
780
break;
781
782
/* For SYNCH processing, if we get here, we are running
783
* through the loop again due to updated lookaheads. Set
784
* flag that we should re-check IRQ status registers again
785
* before leaving IRQ processing, this can net better
786
* performance in high throughput situations.
787
*/
788
*done = false;
789
} while (time_before(jiffies, timeout));
790
791
if (ret && (ret != -ECANCELED))
792
ath10k_warn(ar, "failed to get pending recv messages: %d\n",
793
ret);
794
795
return ret;
796
}
797
798
static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
799
{
800
u32 val;
801
int ret;
802
803
/* TODO: Add firmware crash handling */
804
ath10k_warn(ar, "firmware crashed\n");
805
806
/* read counter to clear the interrupt, the debug error interrupt is
807
* counter 0.
808
*/
809
ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
810
if (ret)
811
ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
812
813
return ret;
814
}
815
816
static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
817
{
818
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
819
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
820
u8 counter_int_status;
821
int ret;
822
823
mutex_lock(&irq_data->mtx);
824
counter_int_status = irq_data->irq_proc_reg->counter_int_status &
825
irq_data->irq_en_reg->cntr_int_status_en;
826
827
/* NOTE: other modules like GMBOX may use the counter interrupt for
828
* credit flow control on other counters, we only need to check for
829
* the debug assertion counter interrupt.
830
*/
831
if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
832
ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
833
else
834
ret = 0;
835
836
mutex_unlock(&irq_data->mtx);
837
838
return ret;
839
}
840
841
static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
842
{
843
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
844
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
845
u8 error_int_status;
846
int ret;
847
848
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
849
850
error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
851
if (!error_int_status) {
852
ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
853
error_int_status);
854
return -EIO;
855
}
856
857
ath10k_dbg(ar, ATH10K_DBG_SDIO,
858
"sdio error_int_status 0x%x\n", error_int_status);
859
860
if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
861
error_int_status))
862
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
863
864
if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
865
error_int_status))
866
ath10k_warn(ar, "rx underflow interrupt error\n");
867
868
if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
869
error_int_status))
870
ath10k_warn(ar, "tx overflow interrupt error\n");
871
872
/* Clear the interrupt */
873
irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
874
875
/* set W1C value to clear the interrupt, this hits the register first */
876
ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
877
error_int_status);
878
if (ret) {
879
ath10k_warn(ar, "unable to write to error int status address: %d\n",
880
ret);
881
return ret;
882
}
883
884
return 0;
885
}
886
887
static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
888
{
889
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
890
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
891
u8 cpu_int_status;
892
int ret;
893
894
mutex_lock(&irq_data->mtx);
895
cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
896
irq_data->irq_en_reg->cpu_int_status_en;
897
if (!cpu_int_status) {
898
ath10k_warn(ar, "CPU interrupt status is zero\n");
899
ret = -EIO;
900
goto out;
901
}
902
903
/* Clear the interrupt */
904
irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
905
906
/* Set up the register transfer buffer to hit the register 4 times,
907
* this is done to make the access 4-byte aligned to mitigate issues
908
* with host bus interconnects that restrict bus transfer lengths to
909
* be a multiple of 4-bytes.
910
*
911
* Set W1C value to clear the interrupt, this hits the register first.
912
*/
913
ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
914
cpu_int_status);
915
if (ret) {
916
ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
917
ret);
918
goto out;
919
}
920
921
out:
922
mutex_unlock(&irq_data->mtx);
923
if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
924
ath10k_sdio_fw_crashed_dump(ar);
925
926
return ret;
927
}
928
929
static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
930
u8 *host_int_status,
931
u32 *lookahead)
932
{
933
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
934
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
935
struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
936
struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
937
u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
938
int ret;
939
940
mutex_lock(&irq_data->mtx);
941
942
*lookahead = 0;
943
*host_int_status = 0;
944
945
/* int_status_en is supposed to be non zero, otherwise interrupts
946
* shouldn't be enabled. There is however a short time frame during
947
* initialization between the irq register and int_status_en init
948
* where this can happen.
949
* We silently ignore this condition.
950
*/
951
if (!irq_en_reg->int_status_en) {
952
ret = 0;
953
goto out;
954
}
955
956
/* Read the first sizeof(struct ath10k_irq_proc_registers)
957
* bytes of the HTC register table. This
958
* will yield us the value of different int status
959
* registers and the lookahead registers.
960
*/
961
ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
962
irq_proc_reg, sizeof(*irq_proc_reg));
963
if (ret) {
964
ath10k_core_start_recovery(ar);
965
ath10k_warn(ar, "read int status fail, start recovery\n");
966
goto out;
967
}
968
969
/* Update only those registers that are enabled */
970
*host_int_status = irq_proc_reg->host_int_status &
971
irq_en_reg->int_status_en;
972
973
/* Look at mbox status */
974
if (!(*host_int_status & htc_mbox)) {
975
*lookahead = 0;
976
ret = 0;
977
goto out;
978
}
979
980
/* Mask out pending mbox value, we use look ahead as
981
* the real flag for mbox processing.
982
*/
983
*host_int_status &= ~htc_mbox;
984
if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
985
*lookahead = le32_to_cpu(
986
irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
987
if (!*lookahead)
988
ath10k_warn(ar, "sdio mbox lookahead is zero\n");
989
}
990
991
out:
992
mutex_unlock(&irq_data->mtx);
993
return ret;
994
}
995
996
static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
997
bool *done)
998
{
999
u8 host_int_status;
1000
u32 lookahead;
1001
int ret;
1002
1003
/* NOTE: HIF implementation guarantees that the context of this
1004
* call allows us to perform SYNCHRONOUS I/O, that is we can block,
1005
* sleep or call any API that can block or switch thread/task
1006
* contexts. This is a fully schedulable context.
1007
*/
1008
1009
ret = ath10k_sdio_mbox_read_int_status(ar,
1010
&host_int_status,
1011
&lookahead);
1012
if (ret) {
1013
*done = true;
1014
goto out;
1015
}
1016
1017
if (!host_int_status && !lookahead) {
1018
ret = 0;
1019
*done = true;
1020
goto out;
1021
}
1022
1023
if (lookahead) {
1024
ath10k_dbg(ar, ATH10K_DBG_SDIO,
1025
"sdio pending mailbox msg lookahead 0x%08x\n",
1026
lookahead);
1027
1028
ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
1029
lookahead,
1030
done);
1031
if (ret)
1032
goto out;
1033
}
1034
1035
/* now, handle the rest of the interrupts */
1036
ath10k_dbg(ar, ATH10K_DBG_SDIO,
1037
"sdio host_int_status 0x%x\n", host_int_status);
1038
1039
if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
1040
/* CPU Interrupt */
1041
ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
1042
if (ret)
1043
goto out;
1044
}
1045
1046
if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
1047
/* Error Interrupt */
1048
ret = ath10k_sdio_mbox_proc_err_intr(ar);
1049
if (ret)
1050
goto out;
1051
}
1052
1053
if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
1054
/* Counter Interrupt */
1055
ret = ath10k_sdio_mbox_proc_counter_intr(ar);
1056
1057
ret = 0;
1058
1059
out:
1060
/* An optimization to bypass reading the IRQ status registers
1061
* unnecessarily which can re-wake the target, if upper layers
1062
* determine that we are in a low-throughput mode, we can rely on
1063
* taking another interrupt rather than re-checking the status
1064
* registers which can re-wake the target.
1065
*
1066
* NOTE : for host interfaces that makes use of detecting pending
1067
* mbox messages at hif can not use this optimization due to
1068
* possible side effects, SPI requires the host to drain all
1069
* messages from the mailbox before exiting the ISR routine.
1070
*/
1071
1072
ath10k_dbg(ar, ATH10K_DBG_SDIO,
1073
"sdio pending irqs done %d status %d",
1074
*done, ret);
1075
1076
return ret;
1077
}
1078
1079
static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1080
{
1081
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1082
struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1083
u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1084
1085
mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1086
mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1087
mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1088
mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1089
mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1090
1091
mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1092
1093
dev_id_base = (device & 0x0F00);
1094
dev_id_chiprev = (device & 0x00FF);
1095
switch (dev_id_base) {
1096
case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
1097
if (dev_id_chiprev < 4)
1098
mbox_info->ext_info[0].htc_ext_sz =
1099
ATH10K_HIF_MBOX0_EXT_WIDTH;
1100
else
1101
/* from QCA6174 2.0(0x504), the width has been extended
1102
* to 56K
1103
*/
1104
mbox_info->ext_info[0].htc_ext_sz =
1105
ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1106
break;
1107
case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
1108
mbox_info->ext_info[0].htc_ext_sz =
1109
ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1110
break;
1111
default:
1112
mbox_info->ext_info[0].htc_ext_sz =
1113
ATH10K_HIF_MBOX0_EXT_WIDTH;
1114
}
1115
1116
mbox_info->ext_info[1].htc_ext_addr =
1117
mbox_info->ext_info[0].htc_ext_addr +
1118
mbox_info->ext_info[0].htc_ext_sz +
1119
ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1120
mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1121
}
1122
1123
/* BMI functions */
1124
1125
static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1126
{
1127
u32 addr, cmd_credits;
1128
unsigned long timeout;
1129
int ret;
1130
1131
/* Read the counter register to get the command credits */
1132
addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1133
timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1134
cmd_credits = 0;
1135
1136
while (time_before(jiffies, timeout) && !cmd_credits) {
1137
/* Hit the credit counter with a 4-byte access, the first byte
1138
* read will hit the counter and cause a decrement, while the
1139
* remaining 3 bytes has no effect. The rationale behind this
1140
* is to make all HIF accesses 4-byte aligned.
1141
*/
1142
ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1143
if (ret) {
1144
ath10k_warn(ar,
1145
"unable to decrement the command credit count register: %d\n",
1146
ret);
1147
return ret;
1148
}
1149
1150
/* The counter is only 8 bits.
1151
* Ignore anything in the upper 3 bytes
1152
*/
1153
cmd_credits &= 0xFF;
1154
}
1155
1156
if (!cmd_credits) {
1157
ath10k_warn(ar, "bmi communication timeout\n");
1158
return -ETIMEDOUT;
1159
}
1160
1161
return 0;
1162
}
1163
1164
static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1165
{
1166
unsigned long timeout;
1167
u32 rx_word;
1168
int ret;
1169
1170
timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1171
rx_word = 0;
1172
1173
while ((time_before(jiffies, timeout)) && !rx_word) {
1174
ret = ath10k_sdio_read32(ar,
1175
MBOX_HOST_INT_STATUS_ADDRESS,
1176
&rx_word);
1177
if (ret) {
1178
ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1179
return ret;
1180
}
1181
1182
/* all we really want is one bit */
1183
rx_word &= 1;
1184
}
1185
1186
if (!rx_word) {
1187
ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1188
return -EINVAL;
1189
}
1190
1191
return ret;
1192
}
1193
1194
static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1195
void *req, u32 req_len,
1196
void *resp, u32 *resp_len)
1197
{
1198
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1199
u32 addr;
1200
int ret;
1201
1202
if (req) {
1203
ret = ath10k_sdio_bmi_credits(ar);
1204
if (ret)
1205
return ret;
1206
1207
addr = ar_sdio->mbox_info.htc_addr;
1208
1209
memcpy(ar_sdio->bmi_buf, req, req_len);
1210
ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1211
if (ret) {
1212
ath10k_warn(ar,
1213
"unable to send the bmi data to the device: %d\n",
1214
ret);
1215
return ret;
1216
}
1217
}
1218
1219
if (!resp || !resp_len)
1220
/* No response expected */
1221
return 0;
1222
1223
/* During normal bootup, small reads may be required.
1224
* Rather than issue an HIF Read and then wait as the Target
1225
* adds successive bytes to the FIFO, we wait here until
1226
* we know that response data is available.
1227
*
1228
* This allows us to cleanly timeout on an unexpected
1229
* Target failure rather than risk problems at the HIF level.
1230
* In particular, this avoids SDIO timeouts and possibly garbage
1231
* data on some host controllers. And on an interconnect
1232
* such as Compact Flash (as well as some SDIO masters) which
1233
* does not provide any indication on data timeout, it avoids
1234
* a potential hang or garbage response.
1235
*
1236
* Synchronization is more difficult for reads larger than the
1237
* size of the MBOX FIFO (128B), because the Target is unable
1238
* to push the 129th byte of data until AFTER the Host posts an
1239
* HIF Read and removes some FIFO data. So for large reads the
1240
* Host proceeds to post an HIF Read BEFORE all the data is
1241
* actually available to read. Fortunately, large BMI reads do
1242
* not occur in practice -- they're supported for debug/development.
1243
*
1244
* So Host/Target BMI synchronization is divided into these cases:
1245
* CASE 1: length < 4
1246
* Should not happen
1247
*
1248
* CASE 2: 4 <= length <= 128
1249
* Wait for first 4 bytes to be in FIFO
1250
* If CONSERVATIVE_BMI_READ is enabled, also wait for
1251
* a BMI command credit, which indicates that the ENTIRE
1252
* response is available in the FIFO
1253
*
1254
* CASE 3: length > 128
1255
* Wait for the first 4 bytes to be in FIFO
1256
*
1257
* For most uses, a small timeout should be sufficient and we will
1258
* usually see a response quickly; but there may be some unusual
1259
* (debug) cases of BMI_EXECUTE where we want an larger timeout.
1260
* For now, we use an unbounded busy loop while waiting for
1261
* BMI_EXECUTE.
1262
*
1263
* If BMI_EXECUTE ever needs to support longer-latency execution,
1264
* especially in production, this code needs to be enhanced to sleep
1265
* and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
1266
* a function of Host processor speed.
1267
*/
1268
ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1269
if (ret)
1270
return ret;
1271
1272
/* We always read from the start of the mbox address */
1273
addr = ar_sdio->mbox_info.htc_addr;
1274
ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1275
if (ret) {
1276
ath10k_warn(ar,
1277
"unable to read the bmi data from the device: %d\n",
1278
ret);
1279
return ret;
1280
}
1281
1282
memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1283
1284
return 0;
1285
}
1286
1287
/* sdio async handling functions */
1288
1289
static struct ath10k_sdio_bus_request
1290
*ath10k_sdio_alloc_busreq(struct ath10k *ar)
1291
{
1292
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1293
struct ath10k_sdio_bus_request *bus_req;
1294
1295
spin_lock_bh(&ar_sdio->lock);
1296
1297
if (list_empty(&ar_sdio->bus_req_freeq)) {
1298
bus_req = NULL;
1299
goto out;
1300
}
1301
1302
bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1303
struct ath10k_sdio_bus_request, list);
1304
list_del(&bus_req->list);
1305
1306
out:
1307
spin_unlock_bh(&ar_sdio->lock);
1308
return bus_req;
1309
}
1310
1311
static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1312
struct ath10k_sdio_bus_request *bus_req)
1313
{
1314
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1315
1316
memset(bus_req, 0, sizeof(*bus_req));
1317
1318
spin_lock_bh(&ar_sdio->lock);
1319
list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1320
spin_unlock_bh(&ar_sdio->lock);
1321
}
1322
1323
static void __ath10k_sdio_write_async(struct ath10k *ar,
1324
struct ath10k_sdio_bus_request *req)
1325
{
1326
struct ath10k_htc_ep *ep;
1327
struct sk_buff *skb;
1328
int ret;
1329
1330
skb = req->skb;
1331
ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1332
if (ret)
1333
ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1334
req->address, ret);
1335
1336
if (req->htc_msg) {
1337
ep = &ar->htc.endpoint[req->eid];
1338
ath10k_htc_notify_tx_completion(ep, skb);
1339
} else if (req->comp) {
1340
complete(req->comp);
1341
}
1342
1343
ath10k_sdio_free_bus_req(ar, req);
1344
}
1345
1346
/* To improve throughput use workqueue to deliver packets to HTC layer,
1347
* this way SDIO bus is utilised much better.
1348
*/
1349
static void ath10k_rx_indication_async_work(struct work_struct *work)
1350
{
1351
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1352
async_work_rx);
1353
struct ath10k *ar = ar_sdio->ar;
1354
struct ath10k_htc_ep *ep;
1355
struct ath10k_skb_rxcb *cb;
1356
struct sk_buff *skb;
1357
1358
while (true) {
1359
skb = skb_dequeue(&ar_sdio->rx_head);
1360
if (!skb)
1361
break;
1362
cb = ATH10K_SKB_RXCB(skb);
1363
ep = &ar->htc.endpoint[cb->eid];
1364
ep->ep_ops.ep_rx_complete(ar, skb);
1365
}
1366
1367
if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
1368
local_bh_disable();
1369
napi_schedule(&ar->napi);
1370
local_bh_enable();
1371
}
1372
}
1373
1374
static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
1375
{
1376
struct ath10k *ar = ar_sdio->ar;
1377
unsigned char rtc_state = 0;
1378
int ret = 0;
1379
1380
rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
1381
if (ret) {
1382
ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
1383
return ret;
1384
}
1385
1386
*state = rtc_state & 0x3;
1387
1388
return ret;
1389
}
1390
1391
static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1392
{
1393
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1394
u32 val;
1395
int retry = ATH10K_CIS_READ_RETRY, ret = 0;
1396
unsigned char rtc_state = 0;
1397
1398
sdio_claim_host(ar_sdio->func);
1399
1400
ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1401
if (ret) {
1402
ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1403
ret);
1404
goto release;
1405
}
1406
1407
if (enable_sleep) {
1408
val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1409
ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
1410
} else {
1411
val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1412
ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
1413
}
1414
1415
ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1416
if (ret) {
1417
ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1418
ret);
1419
}
1420
1421
if (!enable_sleep) {
1422
do {
1423
udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
1424
ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
1425
1426
if (ret) {
1427
ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
1428
break;
1429
}
1430
1431
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
1432
rtc_state);
1433
1434
if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
1435
break;
1436
1437
udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
1438
retry--;
1439
} while (retry > 0);
1440
}
1441
1442
release:
1443
sdio_release_host(ar_sdio->func);
1444
1445
return ret;
1446
}
1447
1448
static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
1449
{
1450
struct ath10k_sdio *ar_sdio = timer_container_of(ar_sdio, t,
1451
sleep_timer);
1452
1453
ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
1454
queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1455
}
1456
1457
static void ath10k_sdio_write_async_work(struct work_struct *work)
1458
{
1459
struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1460
wr_async_work);
1461
struct ath10k *ar = ar_sdio->ar;
1462
struct ath10k_sdio_bus_request *req, *tmp_req;
1463
struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1464
1465
spin_lock_bh(&ar_sdio->wr_async_lock);
1466
1467
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1468
list_del(&req->list);
1469
spin_unlock_bh(&ar_sdio->wr_async_lock);
1470
1471
if (req->address >= mbox_info->htc_addr &&
1472
ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
1473
ath10k_sdio_set_mbox_sleep(ar, false);
1474
mod_timer(&ar_sdio->sleep_timer, jiffies +
1475
msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
1476
}
1477
1478
__ath10k_sdio_write_async(ar, req);
1479
spin_lock_bh(&ar_sdio->wr_async_lock);
1480
}
1481
1482
spin_unlock_bh(&ar_sdio->wr_async_lock);
1483
1484
if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
1485
ath10k_sdio_set_mbox_sleep(ar, true);
1486
}
1487
1488
static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1489
struct sk_buff *skb,
1490
struct completion *comp,
1491
bool htc_msg, enum ath10k_htc_ep_id eid)
1492
{
1493
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1494
struct ath10k_sdio_bus_request *bus_req;
1495
1496
/* Allocate a bus request for the message and queue it on the
1497
* SDIO workqueue.
1498
*/
1499
bus_req = ath10k_sdio_alloc_busreq(ar);
1500
if (!bus_req) {
1501
ath10k_warn(ar,
1502
"unable to allocate bus request for async request\n");
1503
return -ENOMEM;
1504
}
1505
1506
bus_req->skb = skb;
1507
bus_req->eid = eid;
1508
bus_req->address = addr;
1509
bus_req->htc_msg = htc_msg;
1510
bus_req->comp = comp;
1511
1512
spin_lock_bh(&ar_sdio->wr_async_lock);
1513
list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1514
spin_unlock_bh(&ar_sdio->wr_async_lock);
1515
1516
return 0;
1517
}
1518
1519
/* IRQ handler */
1520
1521
static void ath10k_sdio_irq_handler(struct sdio_func *func)
1522
{
1523
struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1524
struct ath10k *ar = ar_sdio->ar;
1525
unsigned long timeout;
1526
bool done = false;
1527
int ret;
1528
1529
/* Release the host during interrupts so we can pick it back up when
1530
* we process commands.
1531
*/
1532
sdio_release_host(ar_sdio->func);
1533
1534
timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1535
do {
1536
ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1537
if (ret)
1538
break;
1539
} while (time_before(jiffies, timeout) && !done);
1540
1541
ath10k_mac_tx_push_pending(ar);
1542
1543
sdio_claim_host(ar_sdio->func);
1544
1545
if (ret && ret != -ECANCELED)
1546
ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1547
ret);
1548
}
1549
1550
/* sdio HIF functions */
1551
1552
static int ath10k_sdio_disable_intrs(struct ath10k *ar)
1553
{
1554
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1555
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1556
struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1557
int ret;
1558
1559
mutex_lock(&irq_data->mtx);
1560
1561
memset(regs, 0, sizeof(*regs));
1562
ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1563
&regs->int_status_en, sizeof(*regs));
1564
if (ret)
1565
ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1566
1567
mutex_unlock(&irq_data->mtx);
1568
1569
return ret;
1570
}
1571
1572
static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1573
enum ath10k_firmware_mode fw_mode)
1574
{
1575
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1576
struct sdio_func *func = ar_sdio->func;
1577
int ret;
1578
1579
if (!ar_sdio->is_disabled)
1580
return 0;
1581
1582
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1583
1584
ret = ath10k_sdio_config(ar);
1585
if (ret) {
1586
ath10k_err(ar, "failed to config sdio: %d\n", ret);
1587
return ret;
1588
}
1589
1590
sdio_claim_host(func);
1591
1592
ret = sdio_enable_func(func);
1593
if (ret) {
1594
ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1595
sdio_release_host(func);
1596
return ret;
1597
}
1598
1599
sdio_release_host(func);
1600
1601
/* Wait for hardware to initialise. It should take a lot less than
1602
* 20 ms but let's be conservative here.
1603
*/
1604
msleep(20);
1605
1606
ar_sdio->is_disabled = false;
1607
1608
ret = ath10k_sdio_disable_intrs(ar);
1609
if (ret)
1610
return ret;
1611
1612
return 0;
1613
}
1614
1615
static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1616
{
1617
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1618
int ret;
1619
1620
if (ar_sdio->is_disabled)
1621
return;
1622
1623
ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1624
1625
timer_delete_sync(&ar_sdio->sleep_timer);
1626
ath10k_sdio_set_mbox_sleep(ar, true);
1627
1628
/* Disable the card */
1629
sdio_claim_host(ar_sdio->func);
1630
1631
ret = sdio_disable_func(ar_sdio->func);
1632
if (ret) {
1633
ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1634
sdio_release_host(ar_sdio->func);
1635
return;
1636
}
1637
1638
ret = mmc_hw_reset(ar_sdio->func->card);
1639
if (ret)
1640
ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1641
1642
sdio_release_host(ar_sdio->func);
1643
1644
ar_sdio->is_disabled = true;
1645
}
1646
1647
static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1648
struct ath10k_hif_sg_item *items, int n_items)
1649
{
1650
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1651
enum ath10k_htc_ep_id eid;
1652
struct sk_buff *skb;
1653
int ret, i;
1654
1655
eid = pipe_id_to_eid(pipe_id);
1656
1657
for (i = 0; i < n_items; i++) {
1658
size_t padded_len;
1659
u32 address;
1660
1661
skb = items[i].transfer_context;
1662
padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1663
skb->len);
1664
skb_trim(skb, padded_len);
1665
1666
/* Write TX data to the end of the mbox address space */
1667
address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1668
skb->len;
1669
ret = ath10k_sdio_prep_async_req(ar, address, skb,
1670
NULL, true, eid);
1671
if (ret)
1672
return ret;
1673
}
1674
1675
queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1676
1677
return 0;
1678
}
1679
1680
static int ath10k_sdio_enable_intrs(struct ath10k *ar)
1681
{
1682
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1683
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1684
struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1685
int ret;
1686
1687
mutex_lock(&irq_data->mtx);
1688
1689
/* Enable all but CPU interrupts */
1690
regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1691
FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1692
FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1693
1694
/* NOTE: There are some cases where HIF can do detection of
1695
* pending mbox messages which is disabled now.
1696
*/
1697
regs->int_status_en |=
1698
FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1699
1700
/* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
1701
* #0 is used for report assertion from target
1702
*/
1703
regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
1704
1705
/* Set up the Error Interrupt status Register */
1706
regs->err_int_status_en =
1707
FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1708
FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1709
1710
/* Enable Counter interrupt status register to get fatal errors for
1711
* debugging.
1712
*/
1713
regs->cntr_int_status_en =
1714
FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1715
ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1716
1717
ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1718
&regs->int_status_en, sizeof(*regs));
1719
if (ret)
1720
ath10k_warn(ar,
1721
"failed to update mbox interrupt status register : %d\n",
1722
ret);
1723
1724
mutex_unlock(&irq_data->mtx);
1725
return ret;
1726
}
1727
1728
/* HIF diagnostics */
1729
1730
static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1731
size_t buf_len)
1732
{
1733
int ret;
1734
void *mem;
1735
1736
mem = kzalloc(buf_len, GFP_KERNEL);
1737
if (!mem)
1738
return -ENOMEM;
1739
1740
/* set window register to start read cycle */
1741
ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1742
if (ret) {
1743
ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1744
goto out;
1745
}
1746
1747
/* read the data */
1748
ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
1749
if (ret) {
1750
ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1751
ret);
1752
goto out;
1753
}
1754
1755
memcpy(buf, mem, buf_len);
1756
1757
out:
1758
kfree(mem);
1759
1760
return ret;
1761
}
1762
1763
static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
1764
u32 *value)
1765
{
1766
__le32 *val;
1767
int ret;
1768
1769
val = kzalloc(sizeof(*val), GFP_KERNEL);
1770
if (!val)
1771
return -ENOMEM;
1772
1773
ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1774
if (ret)
1775
goto out;
1776
1777
*value = __le32_to_cpu(*val);
1778
1779
out:
1780
kfree(val);
1781
1782
return ret;
1783
}
1784
1785
static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1786
const void *data, int nbytes)
1787
{
1788
int ret;
1789
1790
/* set write data */
1791
ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1792
if (ret) {
1793
ath10k_warn(ar,
1794
"failed to write 0x%p to mbox window data address: %d\n",
1795
data, ret);
1796
return ret;
1797
}
1798
1799
/* set window register, which starts the write cycle */
1800
ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1801
if (ret) {
1802
ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1803
return ret;
1804
}
1805
1806
return 0;
1807
}
1808
1809
static int ath10k_sdio_hif_start_post(struct ath10k *ar)
1810
{
1811
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1812
u32 addr, val;
1813
int ret = 0;
1814
1815
addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1816
1817
ret = ath10k_sdio_diag_read32(ar, addr, &val);
1818
if (ret) {
1819
ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1820
return ret;
1821
}
1822
1823
if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1824
ath10k_dbg(ar, ATH10K_DBG_SDIO,
1825
"sdio mailbox swap service enabled\n");
1826
ar_sdio->swap_mbox = true;
1827
} else {
1828
ath10k_dbg(ar, ATH10K_DBG_SDIO,
1829
"sdio mailbox swap service disabled\n");
1830
ar_sdio->swap_mbox = false;
1831
}
1832
1833
ath10k_sdio_set_mbox_sleep(ar, true);
1834
1835
return 0;
1836
}
1837
1838
static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
1839
{
1840
u32 addr, val;
1841
int ret;
1842
1843
addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1844
1845
ret = ath10k_sdio_diag_read32(ar, addr, &val);
1846
if (ret) {
1847
ath10k_warn(ar,
1848
"unable to read hi_acs_flags for htt tx complete: %d\n", ret);
1849
return ret;
1850
}
1851
1852
ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
1853
1854
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
1855
ret ? " " : " not ");
1856
1857
return ret;
1858
}
1859
1860
/* HIF start/stop */
1861
1862
static int ath10k_sdio_hif_start(struct ath10k *ar)
1863
{
1864
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1865
int ret;
1866
1867
ath10k_core_napi_enable(ar);
1868
1869
/* Sleep 20 ms before HIF interrupts are disabled.
1870
* This will give target plenty of time to process the BMI done
1871
* request before interrupts are disabled.
1872
*/
1873
msleep(20);
1874
ret = ath10k_sdio_disable_intrs(ar);
1875
if (ret)
1876
return ret;
1877
1878
/* eid 0 always uses the lower part of the extended mailbox address
1879
* space (ext_info[0].htc_ext_addr).
1880
*/
1881
ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1882
ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1883
1884
sdio_claim_host(ar_sdio->func);
1885
1886
/* Register the isr */
1887
ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1888
if (ret) {
1889
ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1890
sdio_release_host(ar_sdio->func);
1891
return ret;
1892
}
1893
1894
sdio_release_host(ar_sdio->func);
1895
1896
ret = ath10k_sdio_enable_intrs(ar);
1897
if (ret)
1898
ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1899
1900
/* Enable sleep and then disable it again */
1901
ret = ath10k_sdio_set_mbox_sleep(ar, true);
1902
if (ret)
1903
return ret;
1904
1905
/* Wait for 20ms for the written value to take effect */
1906
msleep(20);
1907
1908
ret = ath10k_sdio_set_mbox_sleep(ar, false);
1909
if (ret)
1910
return ret;
1911
1912
return 0;
1913
}
1914
1915
#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1916
1917
static void ath10k_sdio_irq_disable(struct ath10k *ar)
1918
{
1919
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1920
struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1921
struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1922
struct sk_buff *skb;
1923
struct completion irqs_disabled_comp;
1924
int ret;
1925
1926
skb = dev_alloc_skb(sizeof(*regs));
1927
if (!skb)
1928
return;
1929
1930
mutex_lock(&irq_data->mtx);
1931
1932
memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
1933
memcpy(skb->data, regs, sizeof(*regs));
1934
skb_put(skb, sizeof(*regs));
1935
1936
mutex_unlock(&irq_data->mtx);
1937
1938
init_completion(&irqs_disabled_comp);
1939
ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1940
skb, &irqs_disabled_comp, false, 0);
1941
if (ret)
1942
goto out;
1943
1944
queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1945
1946
/* Wait for the completion of the IRQ disable request.
1947
* If there is a timeout we will try to disable irq's anyway.
1948
*/
1949
ret = wait_for_completion_timeout(&irqs_disabled_comp,
1950
SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1951
if (!ret)
1952
ath10k_warn(ar, "sdio irq disable request timed out\n");
1953
1954
sdio_claim_host(ar_sdio->func);
1955
1956
ret = sdio_release_irq(ar_sdio->func);
1957
if (ret)
1958
ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1959
1960
sdio_release_host(ar_sdio->func);
1961
1962
out:
1963
kfree_skb(skb);
1964
}
1965
1966
static void ath10k_sdio_hif_stop(struct ath10k *ar)
1967
{
1968
struct ath10k_sdio_bus_request *req, *tmp_req;
1969
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1970
struct sk_buff *skb;
1971
1972
ath10k_sdio_irq_disable(ar);
1973
1974
cancel_work_sync(&ar_sdio->async_work_rx);
1975
1976
while ((skb = skb_dequeue(&ar_sdio->rx_head)))
1977
dev_kfree_skb_any(skb);
1978
1979
cancel_work_sync(&ar_sdio->wr_async_work);
1980
1981
spin_lock_bh(&ar_sdio->wr_async_lock);
1982
1983
/* Free all bus requests that have not been handled */
1984
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1985
struct ath10k_htc_ep *ep;
1986
1987
list_del(&req->list);
1988
1989
if (req->htc_msg) {
1990
ep = &ar->htc.endpoint[req->eid];
1991
ath10k_htc_notify_tx_completion(ep, req->skb);
1992
} else if (req->skb) {
1993
kfree_skb(req->skb);
1994
}
1995
ath10k_sdio_free_bus_req(ar, req);
1996
}
1997
1998
spin_unlock_bh(&ar_sdio->wr_async_lock);
1999
2000
ath10k_core_napi_sync_disable(ar);
2001
}
2002
2003
#ifdef CONFIG_PM
2004
2005
static int ath10k_sdio_hif_suspend(struct ath10k *ar)
2006
{
2007
return 0;
2008
}
2009
2010
static int ath10k_sdio_hif_resume(struct ath10k *ar)
2011
{
2012
switch (ar->state) {
2013
case ATH10K_STATE_OFF:
2014
ath10k_dbg(ar, ATH10K_DBG_SDIO,
2015
"sdio resume configuring sdio\n");
2016
2017
/* need to set sdio settings after power is cut from sdio */
2018
ath10k_sdio_config(ar);
2019
break;
2020
2021
case ATH10K_STATE_ON:
2022
default:
2023
break;
2024
}
2025
2026
return 0;
2027
}
2028
#endif
2029
2030
static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
2031
u16 service_id,
2032
u8 *ul_pipe, u8 *dl_pipe)
2033
{
2034
struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
2035
struct ath10k_htc *htc = &ar->htc;
2036
u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
2037
enum ath10k_htc_ep_id eid;
2038
bool ep_found = false;
2039
int i;
2040
2041
/* For sdio, we are interested in the mapping between eid
2042
* and pipeid rather than service_id to pipe_id.
2043
* First we find out which eid has been allocated to the
2044
* service...
2045
*/
2046
for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
2047
if (htc->endpoint[i].service_id == service_id) {
2048
eid = htc->endpoint[i].eid;
2049
ep_found = true;
2050
break;
2051
}
2052
}
2053
2054
if (!ep_found)
2055
return -EINVAL;
2056
2057
/* Then we create the simplest mapping possible between pipeid
2058
* and eid
2059
*/
2060
*ul_pipe = *dl_pipe = (u8)eid;
2061
2062
/* Normally, HTT will use the upper part of the extended
2063
* mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
2064
* the lower part (ext_info[0].htc_ext_addr).
2065
* If fw wants swapping of mailbox addresses, the opposite is true.
2066
*/
2067
if (ar_sdio->swap_mbox) {
2068
htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2069
wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2070
htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2071
wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2072
} else {
2073
htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2074
wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2075
htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2076
wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2077
}
2078
2079
switch (service_id) {
2080
case ATH10K_HTC_SVC_ID_RSVD_CTRL:
2081
/* HTC ctrl ep mbox address has already been setup in
2082
* ath10k_sdio_hif_start
2083
*/
2084
break;
2085
case ATH10K_HTC_SVC_ID_WMI_CONTROL:
2086
ar_sdio->mbox_addr[eid] = wmi_addr;
2087
ar_sdio->mbox_size[eid] = wmi_mbox_size;
2088
ath10k_dbg(ar, ATH10K_DBG_SDIO,
2089
"sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
2090
ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2091
break;
2092
case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
2093
ar_sdio->mbox_addr[eid] = htt_addr;
2094
ar_sdio->mbox_size[eid] = htt_mbox_size;
2095
ath10k_dbg(ar, ATH10K_DBG_SDIO,
2096
"sdio htt data mbox_addr 0x%x mbox_size %d\n",
2097
ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2098
break;
2099
default:
2100
ath10k_warn(ar, "unsupported HTC service id: %d\n",
2101
service_id);
2102
return -EINVAL;
2103
}
2104
2105
return 0;
2106
}
2107
2108
static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
2109
u8 *ul_pipe, u8 *dl_pipe)
2110
{
2111
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
2112
2113
/* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
2114
* case) == 0
2115
*/
2116
*ul_pipe = 0;
2117
*dl_pipe = 0;
2118
}
2119
2120
static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
2121
.tx_sg = ath10k_sdio_hif_tx_sg,
2122
.diag_read = ath10k_sdio_hif_diag_read,
2123
.diag_write = ath10k_sdio_hif_diag_write_mem,
2124
.exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
2125
.start = ath10k_sdio_hif_start,
2126
.stop = ath10k_sdio_hif_stop,
2127
.start_post = ath10k_sdio_hif_start_post,
2128
.get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
2129
.map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
2130
.get_default_pipe = ath10k_sdio_hif_get_default_pipe,
2131
.power_up = ath10k_sdio_hif_power_up,
2132
.power_down = ath10k_sdio_hif_power_down,
2133
#ifdef CONFIG_PM
2134
.suspend = ath10k_sdio_hif_suspend,
2135
.resume = ath10k_sdio_hif_resume,
2136
#endif
2137
};
2138
2139
#ifdef CONFIG_PM_SLEEP
2140
2141
/* Empty handlers so that mmc subsystem doesn't remove us entirely during
2142
* suspend. We instead follow cfg80211 suspend/resume handlers.
2143
*/
2144
static int ath10k_sdio_pm_suspend(struct device *device)
2145
{
2146
struct sdio_func *func = dev_to_sdio_func(device);
2147
struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2148
struct ath10k *ar = ar_sdio->ar;
2149
mmc_pm_flag_t pm_flag, pm_caps;
2150
int ret;
2151
2152
if (!device_may_wakeup(ar->dev))
2153
return 0;
2154
2155
ath10k_sdio_set_mbox_sleep(ar, true);
2156
2157
pm_flag = MMC_PM_KEEP_POWER;
2158
2159
ret = sdio_set_host_pm_flags(func, pm_flag);
2160
if (ret) {
2161
pm_caps = sdio_get_host_pm_caps(func);
2162
ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
2163
pm_flag, pm_caps, ret);
2164
return ret;
2165
}
2166
2167
return ret;
2168
}
2169
2170
static int ath10k_sdio_pm_resume(struct device *device)
2171
{
2172
return 0;
2173
}
2174
2175
static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
2176
ath10k_sdio_pm_resume);
2177
2178
#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
2179
2180
#else
2181
2182
#define ATH10K_SDIO_PM_OPS NULL
2183
2184
#endif /* CONFIG_PM_SLEEP */
2185
2186
static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
2187
{
2188
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2189
int done;
2190
2191
done = ath10k_htt_rx_hl_indication(ar, budget);
2192
ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
2193
2194
if (done < budget)
2195
napi_complete_done(ctx, done);
2196
2197
return done;
2198
}
2199
2200
static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
2201
u32 item_offset,
2202
u32 *val)
2203
{
2204
u32 addr;
2205
int ret;
2206
2207
addr = host_interest_item_address(item_offset);
2208
2209
ret = ath10k_sdio_diag_read32(ar, addr, val);
2210
2211
if (ret)
2212
ath10k_warn(ar, "unable to read host interest offset %d value\n",
2213
item_offset);
2214
2215
return ret;
2216
}
2217
2218
static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
2219
u32 buf_len)
2220
{
2221
u32 val;
2222
int i, ret;
2223
2224
for (i = 0; i < buf_len; i += 4) {
2225
ret = ath10k_sdio_diag_read32(ar, address + i, &val);
2226
if (ret) {
2227
ath10k_warn(ar, "unable to read mem %d value\n", address + i);
2228
break;
2229
}
2230
memcpy(buf + i, &val, 4);
2231
}
2232
2233
return ret;
2234
}
2235
2236
static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
2237
{
2238
u32 param;
2239
2240
ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
2241
2242
ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
2243
2244
return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);
2245
}
2246
2247
static void ath10k_sdio_dump_registers(struct ath10k *ar,
2248
struct ath10k_fw_crash_data *crash_data,
2249
bool fast_dump)
2250
{
2251
u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
2252
int i, ret;
2253
u32 reg_dump_area;
2254
2255
ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
2256
&reg_dump_area);
2257
if (ret) {
2258
ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
2259
return;
2260
}
2261
2262
if (fast_dump)
2263
ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
2264
sizeof(reg_dump_values));
2265
else
2266
ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
2267
sizeof(reg_dump_values));
2268
2269
if (ret) {
2270
ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
2271
return;
2272
}
2273
2274
ath10k_err(ar, "firmware register dump:\n");
2275
for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
2276
ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
2277
i,
2278
reg_dump_values[i],
2279
reg_dump_values[i + 1],
2280
reg_dump_values[i + 2],
2281
reg_dump_values[i + 3]);
2282
2283
if (!crash_data)
2284
return;
2285
2286
for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
2287
crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
2288
}
2289
2290
static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
2291
const struct ath10k_mem_region *mem_region,
2292
u8 *buf, size_t buf_len)
2293
{
2294
const struct ath10k_mem_section *cur_section, *next_section;
2295
unsigned int count, section_size, skip_size;
2296
int ret, i, j;
2297
2298
if (!mem_region || !buf)
2299
return 0;
2300
2301
cur_section = &mem_region->section_table.sections[0];
2302
2303
if (mem_region->start > cur_section->start) {
2304
ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
2305
mem_region->start, cur_section->start);
2306
return 0;
2307
}
2308
2309
skip_size = cur_section->start - mem_region->start;
2310
2311
/* fill the gap between the first register section and register
2312
* start address
2313
*/
2314
for (i = 0; i < skip_size; i++) {
2315
*buf = ATH10K_MAGIC_NOT_COPIED;
2316
buf++;
2317
}
2318
2319
count = 0;
2320
i = 0;
2321
for (; cur_section; cur_section = next_section) {
2322
section_size = cur_section->end - cur_section->start;
2323
2324
if (section_size <= 0) {
2325
ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
2326
cur_section->start,
2327
cur_section->end);
2328
break;
2329
}
2330
2331
if (++i == mem_region->section_table.size) {
2332
/* last section */
2333
next_section = NULL;
2334
skip_size = 0;
2335
} else {
2336
next_section = cur_section + 1;
2337
2338
if (cur_section->end > next_section->start) {
2339
ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
2340
next_section->start,
2341
cur_section->end);
2342
break;
2343
}
2344
2345
skip_size = next_section->start - cur_section->end;
2346
}
2347
2348
if (buf_len < (skip_size + section_size)) {
2349
ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
2350
break;
2351
}
2352
2353
buf_len -= skip_size + section_size;
2354
2355
/* read section to dest memory */
2356
ret = ath10k_sdio_read_mem(ar, cur_section->start,
2357
buf, section_size);
2358
if (ret) {
2359
ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
2360
cur_section->start, ret);
2361
break;
2362
}
2363
2364
buf += section_size;
2365
count += section_size;
2366
2367
/* fill in the gap between this section and the next */
2368
for (j = 0; j < skip_size; j++) {
2369
*buf = ATH10K_MAGIC_NOT_COPIED;
2370
buf++;
2371
}
2372
2373
count += skip_size;
2374
}
2375
2376
return count;
2377
}
2378
2379
/* if an error happened returns < 0, otherwise the length */
2380
static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
2381
const struct ath10k_mem_region *current_region,
2382
u8 *buf,
2383
bool fast_dump)
2384
{
2385
int ret;
2386
2387
if (current_region->section_table.size > 0)
2388
/* Copy each section individually. */
2389
return ath10k_sdio_dump_memory_section(ar,
2390
current_region,
2391
buf,
2392
current_region->len);
2393
2394
/* No individual memory sections defined so we can
2395
* copy the entire memory region.
2396
*/
2397
if (fast_dump)
2398
ret = ath10k_bmi_read_memory(ar,
2399
current_region->start,
2400
buf,
2401
current_region->len);
2402
else
2403
ret = ath10k_sdio_read_mem(ar,
2404
current_region->start,
2405
buf,
2406
current_region->len);
2407
2408
if (ret) {
2409
ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
2410
current_region->name, ret);
2411
return ret;
2412
}
2413
2414
return current_region->len;
2415
}
2416
2417
static void ath10k_sdio_dump_memory(struct ath10k *ar,
2418
struct ath10k_fw_crash_data *crash_data,
2419
bool fast_dump)
2420
{
2421
const struct ath10k_hw_mem_layout *mem_layout;
2422
const struct ath10k_mem_region *current_region;
2423
struct ath10k_dump_ram_data_hdr *hdr;
2424
u32 count;
2425
size_t buf_len;
2426
int ret, i;
2427
u8 *buf;
2428
2429
if (!crash_data)
2430
return;
2431
2432
mem_layout = ath10k_coredump_get_mem_layout(ar);
2433
if (!mem_layout)
2434
return;
2435
2436
current_region = &mem_layout->region_table.regions[0];
2437
2438
buf = crash_data->ramdump_buf;
2439
buf_len = crash_data->ramdump_buf_len;
2440
2441
memset(buf, 0, buf_len);
2442
2443
for (i = 0; i < mem_layout->region_table.size; i++) {
2444
count = 0;
2445
2446
if (current_region->len > buf_len) {
2447
ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
2448
current_region->name,
2449
current_region->len,
2450
buf_len);
2451
break;
2452
}
2453
2454
/* Reserve space for the header. */
2455
hdr = (void *)buf;
2456
buf += sizeof(*hdr);
2457
buf_len -= sizeof(*hdr);
2458
2459
ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
2460
fast_dump);
2461
if (ret >= 0)
2462
count = ret;
2463
2464
hdr->region_type = cpu_to_le32(current_region->type);
2465
hdr->start = cpu_to_le32(current_region->start);
2466
hdr->length = cpu_to_le32(count);
2467
2468
if (count == 0)
2469
/* Note: the header remains, just with zero length. */
2470
break;
2471
2472
buf += count;
2473
buf_len -= count;
2474
2475
current_region++;
2476
}
2477
}
2478
2479
void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
2480
{
2481
struct ath10k_fw_crash_data *crash_data;
2482
char guid[UUID_STRING_LEN + 1];
2483
bool fast_dump;
2484
2485
fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
2486
2487
if (fast_dump)
2488
ath10k_bmi_start(ar);
2489
2490
ar->stats.fw_crash_counter++;
2491
2492
ath10k_sdio_disable_intrs(ar);
2493
2494
crash_data = ath10k_coredump_new(ar);
2495
2496
if (crash_data)
2497
scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
2498
else
2499
scnprintf(guid, sizeof(guid), "n/a");
2500
2501
ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
2502
ath10k_print_driver_info(ar);
2503
ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
2504
ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
2505
2506
ath10k_sdio_enable_intrs(ar);
2507
2508
ath10k_core_start_recovery(ar);
2509
}
2510
2511
static int ath10k_sdio_probe(struct sdio_func *func,
2512
const struct sdio_device_id *id)
2513
{
2514
struct ath10k_sdio *ar_sdio;
2515
struct ath10k *ar;
2516
enum ath10k_hw_rev hw_rev;
2517
u32 dev_id_base;
2518
struct ath10k_bus_params bus_params = {};
2519
int ret, i;
2520
2521
/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
2522
* If there will be newer chipsets that does not use the hw reg
2523
* setup as defined in qca6174_regs and qca6174_values, this
2524
* assumption is no longer valid and hw_rev must be setup differently
2525
* depending on chipset.
2526
*/
2527
hw_rev = ATH10K_HW_QCA6174;
2528
2529
ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
2530
hw_rev, &ath10k_sdio_hif_ops);
2531
if (!ar) {
2532
dev_err(&func->dev, "failed to allocate core\n");
2533
return -ENOMEM;
2534
}
2535
2536
netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
2537
2538
ath10k_dbg(ar, ATH10K_DBG_BOOT,
2539
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
2540
func->num, func->vendor, func->device,
2541
func->max_blksize, func->cur_blksize);
2542
2543
ar_sdio = ath10k_sdio_priv(ar);
2544
2545
ar_sdio->irq_data.irq_proc_reg =
2546
devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
2547
GFP_KERNEL);
2548
if (!ar_sdio->irq_data.irq_proc_reg) {
2549
ret = -ENOMEM;
2550
goto err_core_destroy;
2551
}
2552
2553
ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
2554
if (!ar_sdio->vsg_buffer) {
2555
ret = -ENOMEM;
2556
goto err_core_destroy;
2557
}
2558
2559
ar_sdio->irq_data.irq_en_reg =
2560
devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
2561
GFP_KERNEL);
2562
if (!ar_sdio->irq_data.irq_en_reg) {
2563
ret = -ENOMEM;
2564
goto err_core_destroy;
2565
}
2566
2567
ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
2568
if (!ar_sdio->bmi_buf) {
2569
ret = -ENOMEM;
2570
goto err_core_destroy;
2571
}
2572
2573
ar_sdio->func = func;
2574
sdio_set_drvdata(func, ar_sdio);
2575
2576
ar_sdio->is_disabled = true;
2577
ar_sdio->ar = ar;
2578
2579
spin_lock_init(&ar_sdio->lock);
2580
spin_lock_init(&ar_sdio->wr_async_lock);
2581
mutex_init(&ar_sdio->irq_data.mtx);
2582
2583
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2584
INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2585
2586
INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2587
ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2588
if (!ar_sdio->workqueue) {
2589
ret = -ENOMEM;
2590
goto err_core_destroy;
2591
}
2592
2593
for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2594
ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2595
2596
skb_queue_head_init(&ar_sdio->rx_head);
2597
INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
2598
2599
dev_id_base = (id->device & 0x0F00);
2600
if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
2601
dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
2602
ret = -ENODEV;
2603
ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2604
dev_id_base, id->device);
2605
goto err_free_wq;
2606
}
2607
2608
ar->dev_id = QCA9377_1_0_DEVICE_ID;
2609
ar->id.vendor = id->vendor;
2610
ar->id.device = id->device;
2611
2612
ath10k_sdio_set_mbox_info(ar);
2613
2614
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
2615
/* TODO: don't know yet how to get chip_id with SDIO */
2616
bus_params.chip_id = 0;
2617
bus_params.hl_msdu_ids = true;
2618
2619
ar->hw->max_mtu = ETH_DATA_LEN;
2620
2621
ret = ath10k_core_register(ar, &bus_params);
2622
if (ret) {
2623
ath10k_err(ar, "failed to register driver core: %d\n", ret);
2624
goto err_free_wq;
2625
}
2626
2627
timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
2628
2629
return 0;
2630
2631
err_free_wq:
2632
destroy_workqueue(ar_sdio->workqueue);
2633
err_core_destroy:
2634
ath10k_core_destroy(ar);
2635
2636
return ret;
2637
}
2638
2639
static void ath10k_sdio_remove(struct sdio_func *func)
2640
{
2641
struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2642
struct ath10k *ar = ar_sdio->ar;
2643
2644
ath10k_dbg(ar, ATH10K_DBG_BOOT,
2645
"sdio removed func %d vendor 0x%x device 0x%x\n",
2646
func->num, func->vendor, func->device);
2647
2648
ath10k_core_unregister(ar);
2649
2650
netif_napi_del(&ar->napi);
2651
2652
destroy_workqueue(ar_sdio->workqueue);
2653
2654
ath10k_core_destroy(ar);
2655
}
2656
2657
static const struct sdio_device_id ath10k_sdio_devices[] = {
2658
{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
2659
{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
2660
{},
2661
};
2662
2663
MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2664
2665
static struct sdio_driver ath10k_sdio_driver = {
2666
.name = "ath10k_sdio",
2667
.id_table = ath10k_sdio_devices,
2668
.probe = ath10k_sdio_probe,
2669
.remove = ath10k_sdio_remove,
2670
.drv = {
2671
.pm = ATH10K_SDIO_PM_OPS,
2672
},
2673
};
2674
module_sdio_driver(ath10k_sdio_driver);
2675
2676
MODULE_AUTHOR("Qualcomm Atheros");
2677
MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2678
MODULE_LICENSE("Dual BSD/GPL");
2679
2680