Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bluetooth/hci_qca.c
26378 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Bluetooth Software UART Qualcomm protocol
4
*
5
* HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
6
* protocol extension to H4.
7
*
8
* Copyright (C) 2007 Texas Instruments, Inc.
9
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
10
*
11
* Acknowledgements:
12
* This file is based on hci_ll.c, which was...
13
* Written by Ohad Ben-Cohen <[email protected]>
14
* which was in turn based on hci_h4.c, which was written
15
* by Maxim Krasnyansky and Marcel Holtmann.
16
*/
17
18
#include <linux/kernel.h>
19
#include <linux/clk.h>
20
#include <linux/completion.h>
21
#include <linux/debugfs.h>
22
#include <linux/delay.h>
23
#include <linux/devcoredump.h>
24
#include <linux/device.h>
25
#include <linux/gpio/consumer.h>
26
#include <linux/mod_devicetable.h>
27
#include <linux/module.h>
28
#include <linux/of.h>
29
#include <linux/acpi.h>
30
#include <linux/platform_device.h>
31
#include <linux/pwrseq/consumer.h>
32
#include <linux/regulator/consumer.h>
33
#include <linux/serdev.h>
34
#include <linux/string_choices.h>
35
#include <linux/mutex.h>
36
#include <linux/unaligned.h>
37
38
#include <net/bluetooth/bluetooth.h>
39
#include <net/bluetooth/hci_core.h>
40
41
#include "hci_uart.h"
42
#include "btqca.h"
43
44
/* HCI_IBS protocol messages */
45
#define HCI_IBS_SLEEP_IND 0xFE
46
#define HCI_IBS_WAKE_IND 0xFD
47
#define HCI_IBS_WAKE_ACK 0xFC
48
#define HCI_MAX_IBS_SIZE 10
49
50
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
51
#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200
52
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
53
#define CMD_TRANS_TIMEOUT_MS 100
54
#define MEMDUMP_TIMEOUT_MS 8000
55
#define IBS_DISABLE_SSR_TIMEOUT_MS \
56
(MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
57
#define FW_DOWNLOAD_TIMEOUT_MS 3000
58
59
/* susclk rate */
60
#define SUSCLK_RATE_32KHZ 32768
61
62
/* Controller debug log header */
63
#define QCA_DEBUG_HANDLE 0x2EDC
64
65
/* max retry count when init fails */
66
#define MAX_INIT_RETRIES 3
67
68
/* Controller dump header */
69
#define QCA_SSR_DUMP_HANDLE 0x0108
70
#define QCA_DUMP_PACKET_SIZE 255
71
#define QCA_LAST_SEQUENCE_NUM 0xFFFF
72
#define QCA_CRASHBYTE_PACKET_LEN 1096
73
#define QCA_MEMDUMP_BYTE 0xFB
74
75
enum qca_flags {
76
QCA_IBS_DISABLED,
77
QCA_DROP_VENDOR_EVENT,
78
QCA_SUSPENDING,
79
QCA_MEMDUMP_COLLECTION,
80
QCA_HW_ERROR_EVENT,
81
QCA_SSR_TRIGGERED,
82
QCA_BT_OFF,
83
QCA_ROM_FW,
84
QCA_DEBUGFS_CREATED,
85
};
86
87
enum qca_capabilities {
88
QCA_CAP_WIDEBAND_SPEECH = BIT(0),
89
QCA_CAP_VALID_LE_STATES = BIT(1),
90
};
91
92
/* HCI_IBS transmit side sleep protocol states */
93
enum tx_ibs_states {
94
HCI_IBS_TX_ASLEEP,
95
HCI_IBS_TX_WAKING,
96
HCI_IBS_TX_AWAKE,
97
};
98
99
/* HCI_IBS receive side sleep protocol states */
100
enum rx_states {
101
HCI_IBS_RX_ASLEEP,
102
HCI_IBS_RX_AWAKE,
103
};
104
105
/* HCI_IBS transmit and receive side clock state vote */
106
enum hci_ibs_clock_state_vote {
107
HCI_IBS_VOTE_STATS_UPDATE,
108
HCI_IBS_TX_VOTE_CLOCK_ON,
109
HCI_IBS_TX_VOTE_CLOCK_OFF,
110
HCI_IBS_RX_VOTE_CLOCK_ON,
111
HCI_IBS_RX_VOTE_CLOCK_OFF,
112
};
113
114
/* Controller memory dump states */
115
enum qca_memdump_states {
116
QCA_MEMDUMP_IDLE,
117
QCA_MEMDUMP_COLLECTING,
118
QCA_MEMDUMP_COLLECTED,
119
QCA_MEMDUMP_TIMEOUT,
120
};
121
122
struct qca_memdump_info {
123
u32 current_seq_no;
124
u32 received_dump;
125
u32 ram_dump_size;
126
};
127
128
struct qca_memdump_event_hdr {
129
__u8 evt;
130
__u8 plen;
131
__u16 opcode;
132
__le16 seq_no;
133
__u8 reserved;
134
} __packed;
135
136
137
struct qca_dump_size {
138
__le32 dump_size;
139
} __packed;
140
141
struct qca_data {
142
struct hci_uart *hu;
143
struct sk_buff *rx_skb;
144
struct sk_buff_head txq;
145
struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
146
struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
147
spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
148
u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
149
u8 rx_ibs_state; /* HCI_IBS receive side power state */
150
bool tx_vote; /* Clock must be on for TX */
151
bool rx_vote; /* Clock must be on for RX */
152
struct timer_list tx_idle_timer;
153
u32 tx_idle_delay;
154
struct timer_list wake_retrans_timer;
155
u32 wake_retrans;
156
struct workqueue_struct *workqueue;
157
struct work_struct ws_awake_rx;
158
struct work_struct ws_awake_device;
159
struct work_struct ws_rx_vote_off;
160
struct work_struct ws_tx_vote_off;
161
struct work_struct ctrl_memdump_evt;
162
struct delayed_work ctrl_memdump_timeout;
163
struct qca_memdump_info *qca_memdump;
164
unsigned long flags;
165
struct completion drop_ev_comp;
166
wait_queue_head_t suspend_wait_q;
167
enum qca_memdump_states memdump_state;
168
struct mutex hci_memdump_lock;
169
170
u16 fw_version;
171
u16 controller_id;
172
/* For debugging purpose */
173
u64 ibs_sent_wacks;
174
u64 ibs_sent_slps;
175
u64 ibs_sent_wakes;
176
u64 ibs_recv_wacks;
177
u64 ibs_recv_slps;
178
u64 ibs_recv_wakes;
179
u64 vote_last_jif;
180
u32 vote_on_ms;
181
u32 vote_off_ms;
182
u64 tx_votes_on;
183
u64 rx_votes_on;
184
u64 tx_votes_off;
185
u64 rx_votes_off;
186
u64 votes_on;
187
u64 votes_off;
188
};
189
190
enum qca_speed_type {
191
QCA_INIT_SPEED = 1,
192
QCA_OPER_SPEED
193
};
194
195
/*
196
* Voltage regulator information required for configuring the
197
* QCA Bluetooth chipset
198
*/
199
struct qca_vreg {
200
const char *name;
201
unsigned int load_uA;
202
};
203
204
struct qca_device_data {
205
enum qca_btsoc_type soc_type;
206
struct qca_vreg *vregs;
207
size_t num_vregs;
208
uint32_t capabilities;
209
};
210
211
/*
212
* Platform data for the QCA Bluetooth power driver.
213
*/
214
struct qca_power {
215
struct device *dev;
216
struct regulator_bulk_data *vreg_bulk;
217
int num_vregs;
218
bool vregs_on;
219
struct pwrseq_desc *pwrseq;
220
};
221
222
struct qca_serdev {
223
struct hci_uart serdev_hu;
224
struct gpio_desc *bt_en;
225
struct gpio_desc *sw_ctrl;
226
struct clk *susclk;
227
enum qca_btsoc_type btsoc_type;
228
struct qca_power *bt_power;
229
u32 init_speed;
230
u32 oper_speed;
231
bool bdaddr_property_broken;
232
const char *firmware_name[2];
233
};
234
235
static int qca_regulator_enable(struct qca_serdev *qcadev);
236
static void qca_regulator_disable(struct qca_serdev *qcadev);
237
static void qca_power_shutdown(struct hci_uart *hu);
238
static int qca_power_off(struct hci_dev *hdev);
239
static void qca_controller_memdump(struct work_struct *work);
240
static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb);
241
242
static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
243
{
244
enum qca_btsoc_type soc_type;
245
246
if (hu->serdev) {
247
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
248
249
soc_type = qsd->btsoc_type;
250
} else {
251
soc_type = QCA_ROME;
252
}
253
254
return soc_type;
255
}
256
257
static const char *qca_get_firmware_name(struct hci_uart *hu)
258
{
259
if (hu->serdev) {
260
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
261
262
return qsd->firmware_name[0];
263
} else {
264
return NULL;
265
}
266
}
267
268
static const char *qca_get_rampatch_name(struct hci_uart *hu)
269
{
270
if (hu->serdev) {
271
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
272
273
return qsd->firmware_name[1];
274
} else {
275
return NULL;
276
}
277
}
278
279
static void __serial_clock_on(struct tty_struct *tty)
280
{
281
/* TODO: Some chipset requires to enable UART clock on client
282
* side to save power consumption or manual work is required.
283
* Please put your code to control UART clock here if needed
284
*/
285
}
286
287
static void __serial_clock_off(struct tty_struct *tty)
288
{
289
/* TODO: Some chipset requires to disable UART clock on client
290
* side to save power consumption or manual work is required.
291
* Please put your code to control UART clock off here if needed
292
*/
293
}
294
295
/* serial_clock_vote needs to be called with the ibs lock held */
296
static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
297
{
298
struct qca_data *qca = hu->priv;
299
unsigned int diff;
300
301
bool old_vote = (qca->tx_vote | qca->rx_vote);
302
bool new_vote;
303
304
switch (vote) {
305
case HCI_IBS_VOTE_STATS_UPDATE:
306
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
307
308
if (old_vote)
309
qca->vote_off_ms += diff;
310
else
311
qca->vote_on_ms += diff;
312
return;
313
314
case HCI_IBS_TX_VOTE_CLOCK_ON:
315
qca->tx_vote = true;
316
qca->tx_votes_on++;
317
break;
318
319
case HCI_IBS_RX_VOTE_CLOCK_ON:
320
qca->rx_vote = true;
321
qca->rx_votes_on++;
322
break;
323
324
case HCI_IBS_TX_VOTE_CLOCK_OFF:
325
qca->tx_vote = false;
326
qca->tx_votes_off++;
327
break;
328
329
case HCI_IBS_RX_VOTE_CLOCK_OFF:
330
qca->rx_vote = false;
331
qca->rx_votes_off++;
332
break;
333
334
default:
335
BT_ERR("Voting irregularity");
336
return;
337
}
338
339
new_vote = qca->rx_vote | qca->tx_vote;
340
341
if (new_vote != old_vote) {
342
if (new_vote)
343
__serial_clock_on(hu->tty);
344
else
345
__serial_clock_off(hu->tty);
346
347
BT_DBG("Vote serial clock %s(%s)", str_true_false(new_vote),
348
str_true_false(vote));
349
350
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
351
352
if (new_vote) {
353
qca->votes_on++;
354
qca->vote_off_ms += diff;
355
} else {
356
qca->votes_off++;
357
qca->vote_on_ms += diff;
358
}
359
qca->vote_last_jif = jiffies;
360
}
361
}
362
363
/* Builds and sends an HCI_IBS command packet.
364
* These are very simple packets with only 1 cmd byte.
365
*/
366
static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
367
{
368
int err = 0;
369
struct sk_buff *skb = NULL;
370
struct qca_data *qca = hu->priv;
371
372
BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
373
374
skb = bt_skb_alloc(1, GFP_ATOMIC);
375
if (!skb) {
376
BT_ERR("Failed to allocate memory for HCI_IBS packet");
377
return -ENOMEM;
378
}
379
380
/* Assign HCI_IBS type */
381
skb_put_u8(skb, cmd);
382
383
skb_queue_tail(&qca->txq, skb);
384
385
return err;
386
}
387
388
static void qca_wq_awake_device(struct work_struct *work)
389
{
390
struct qca_data *qca = container_of(work, struct qca_data,
391
ws_awake_device);
392
struct hci_uart *hu = qca->hu;
393
unsigned long retrans_delay;
394
unsigned long flags;
395
396
BT_DBG("hu %p wq awake device", hu);
397
398
/* Vote for serial clock */
399
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
400
401
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
402
403
/* Send wake indication to device */
404
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
405
BT_ERR("Failed to send WAKE to device");
406
407
qca->ibs_sent_wakes++;
408
409
/* Start retransmit timer */
410
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
411
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
412
413
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
414
415
/* Actually send the packets */
416
hci_uart_tx_wakeup(hu);
417
}
418
419
static void qca_wq_awake_rx(struct work_struct *work)
420
{
421
struct qca_data *qca = container_of(work, struct qca_data,
422
ws_awake_rx);
423
struct hci_uart *hu = qca->hu;
424
unsigned long flags;
425
426
BT_DBG("hu %p wq awake rx", hu);
427
428
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
429
430
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
431
qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
432
433
/* Always acknowledge device wake up,
434
* sending IBS message doesn't count as TX ON.
435
*/
436
if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
437
BT_ERR("Failed to acknowledge device wake up");
438
439
qca->ibs_sent_wacks++;
440
441
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
442
443
/* Actually send the packets */
444
hci_uart_tx_wakeup(hu);
445
}
446
447
static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
448
{
449
struct qca_data *qca = container_of(work, struct qca_data,
450
ws_rx_vote_off);
451
struct hci_uart *hu = qca->hu;
452
453
BT_DBG("hu %p rx clock vote off", hu);
454
455
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
456
}
457
458
static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
459
{
460
struct qca_data *qca = container_of(work, struct qca_data,
461
ws_tx_vote_off);
462
struct hci_uart *hu = qca->hu;
463
464
BT_DBG("hu %p tx clock vote off", hu);
465
466
/* Run HCI tx handling unlocked */
467
hci_uart_tx_wakeup(hu);
468
469
/* Now that message queued to tty driver, vote for tty clocks off.
470
* It is up to the tty driver to pend the clocks off until tx done.
471
*/
472
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
473
}
474
475
static void hci_ibs_tx_idle_timeout(struct timer_list *t)
476
{
477
struct qca_data *qca = timer_container_of(qca, t, tx_idle_timer);
478
struct hci_uart *hu = qca->hu;
479
unsigned long flags;
480
481
BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
482
483
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
484
flags, SINGLE_DEPTH_NESTING);
485
486
switch (qca->tx_ibs_state) {
487
case HCI_IBS_TX_AWAKE:
488
/* TX_IDLE, go to SLEEP */
489
if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
490
BT_ERR("Failed to send SLEEP to device");
491
break;
492
}
493
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
494
qca->ibs_sent_slps++;
495
queue_work(qca->workqueue, &qca->ws_tx_vote_off);
496
break;
497
498
case HCI_IBS_TX_ASLEEP:
499
case HCI_IBS_TX_WAKING:
500
default:
501
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
502
break;
503
}
504
505
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
506
}
507
508
static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
509
{
510
struct qca_data *qca = timer_container_of(qca, t, wake_retrans_timer);
511
struct hci_uart *hu = qca->hu;
512
unsigned long flags, retrans_delay;
513
bool retransmit = false;
514
515
BT_DBG("hu %p wake retransmit timeout in %d state",
516
hu, qca->tx_ibs_state);
517
518
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
519
flags, SINGLE_DEPTH_NESTING);
520
521
/* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
522
if (test_bit(QCA_SUSPENDING, &qca->flags)) {
523
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
524
return;
525
}
526
527
switch (qca->tx_ibs_state) {
528
case HCI_IBS_TX_WAKING:
529
/* No WAKE_ACK, retransmit WAKE */
530
retransmit = true;
531
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
532
BT_ERR("Failed to acknowledge device wake up");
533
break;
534
}
535
qca->ibs_sent_wakes++;
536
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
537
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
538
break;
539
540
case HCI_IBS_TX_ASLEEP:
541
case HCI_IBS_TX_AWAKE:
542
default:
543
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
544
break;
545
}
546
547
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
548
549
if (retransmit)
550
hci_uart_tx_wakeup(hu);
551
}
552
553
554
static void qca_controller_memdump_timeout(struct work_struct *work)
555
{
556
struct qca_data *qca = container_of(work, struct qca_data,
557
ctrl_memdump_timeout.work);
558
struct hci_uart *hu = qca->hu;
559
560
mutex_lock(&qca->hci_memdump_lock);
561
if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
562
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
563
if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
564
/* Inject hw error event to reset the device
565
* and driver.
566
*/
567
hci_reset_dev(hu->hdev);
568
}
569
}
570
571
mutex_unlock(&qca->hci_memdump_lock);
572
}
573
574
575
/* Initialize protocol */
576
static int qca_open(struct hci_uart *hu)
577
{
578
struct qca_serdev *qcadev;
579
struct qca_data *qca;
580
581
BT_DBG("hu %p qca_open", hu);
582
583
if (!hci_uart_has_flow_control(hu))
584
return -EOPNOTSUPP;
585
586
qca = kzalloc(sizeof(*qca), GFP_KERNEL);
587
if (!qca)
588
return -ENOMEM;
589
590
skb_queue_head_init(&qca->txq);
591
skb_queue_head_init(&qca->tx_wait_q);
592
skb_queue_head_init(&qca->rx_memdump_q);
593
spin_lock_init(&qca->hci_ibs_lock);
594
mutex_init(&qca->hci_memdump_lock);
595
qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
596
if (!qca->workqueue) {
597
BT_ERR("QCA Workqueue not initialized properly");
598
kfree(qca);
599
return -ENOMEM;
600
}
601
602
INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
603
INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
604
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
605
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
606
INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
607
INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
608
qca_controller_memdump_timeout);
609
init_waitqueue_head(&qca->suspend_wait_q);
610
611
qca->hu = hu;
612
init_completion(&qca->drop_ev_comp);
613
614
/* Assume we start with both sides asleep -- extra wakes OK */
615
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
616
qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
617
618
qca->vote_last_jif = jiffies;
619
620
hu->priv = qca;
621
622
if (hu->serdev) {
623
qcadev = serdev_device_get_drvdata(hu->serdev);
624
625
switch (qcadev->btsoc_type) {
626
case QCA_WCN3950:
627
case QCA_WCN3988:
628
case QCA_WCN3990:
629
case QCA_WCN3991:
630
case QCA_WCN3998:
631
case QCA_WCN6750:
632
hu->init_speed = qcadev->init_speed;
633
break;
634
635
default:
636
break;
637
}
638
639
if (qcadev->oper_speed)
640
hu->oper_speed = qcadev->oper_speed;
641
}
642
643
timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
644
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
645
646
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
647
qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
648
649
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
650
qca->tx_idle_delay, qca->wake_retrans);
651
652
return 0;
653
}
654
655
static void qca_debugfs_init(struct hci_dev *hdev)
656
{
657
struct hci_uart *hu = hci_get_drvdata(hdev);
658
struct qca_data *qca = hu->priv;
659
struct dentry *ibs_dir;
660
umode_t mode;
661
662
if (!hdev->debugfs)
663
return;
664
665
if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
666
return;
667
668
ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
669
670
/* read only */
671
mode = 0444;
672
debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
673
debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
674
debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
675
&qca->ibs_sent_slps);
676
debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
677
&qca->ibs_sent_wakes);
678
debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
679
&qca->ibs_sent_wacks);
680
debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
681
&qca->ibs_recv_slps);
682
debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
683
&qca->ibs_recv_wakes);
684
debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
685
&qca->ibs_recv_wacks);
686
debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
687
debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
688
debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
689
debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
690
debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
691
debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
692
debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
693
debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
694
debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
695
debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
696
697
/* read/write */
698
mode = 0644;
699
debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
700
debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
701
&qca->tx_idle_delay);
702
}
703
704
/* Flush protocol data */
705
static int qca_flush(struct hci_uart *hu)
706
{
707
struct qca_data *qca = hu->priv;
708
709
BT_DBG("hu %p qca flush", hu);
710
711
skb_queue_purge(&qca->tx_wait_q);
712
skb_queue_purge(&qca->txq);
713
714
return 0;
715
}
716
717
/* Close protocol */
718
static int qca_close(struct hci_uart *hu)
719
{
720
struct qca_data *qca = hu->priv;
721
722
BT_DBG("hu %p qca close", hu);
723
724
serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
725
726
skb_queue_purge(&qca->tx_wait_q);
727
skb_queue_purge(&qca->txq);
728
skb_queue_purge(&qca->rx_memdump_q);
729
/*
730
* Shut the timers down so they can't be rearmed when
731
* destroy_workqueue() drains pending work which in turn might try
732
* to arm a timer. After shutdown rearm attempts are silently
733
* ignored by the timer core code.
734
*/
735
timer_shutdown_sync(&qca->tx_idle_timer);
736
timer_shutdown_sync(&qca->wake_retrans_timer);
737
destroy_workqueue(qca->workqueue);
738
qca->hu = NULL;
739
740
kfree_skb(qca->rx_skb);
741
742
hu->priv = NULL;
743
744
kfree(qca);
745
746
return 0;
747
}
748
749
/* Called upon a wake-up-indication from the device.
750
*/
751
static void device_want_to_wakeup(struct hci_uart *hu)
752
{
753
unsigned long flags;
754
struct qca_data *qca = hu->priv;
755
756
BT_DBG("hu %p want to wake up", hu);
757
758
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
759
760
qca->ibs_recv_wakes++;
761
762
/* Don't wake the rx up when suspending. */
763
if (test_bit(QCA_SUSPENDING, &qca->flags)) {
764
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
765
return;
766
}
767
768
switch (qca->rx_ibs_state) {
769
case HCI_IBS_RX_ASLEEP:
770
/* Make sure clock is on - we may have turned clock off since
771
* receiving the wake up indicator awake rx clock.
772
*/
773
queue_work(qca->workqueue, &qca->ws_awake_rx);
774
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
775
return;
776
777
case HCI_IBS_RX_AWAKE:
778
/* Always acknowledge device wake up,
779
* sending IBS message doesn't count as TX ON.
780
*/
781
if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
782
BT_ERR("Failed to acknowledge device wake up");
783
break;
784
}
785
qca->ibs_sent_wacks++;
786
break;
787
788
default:
789
/* Any other state is illegal */
790
BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
791
qca->rx_ibs_state);
792
break;
793
}
794
795
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
796
797
/* Actually send the packets */
798
hci_uart_tx_wakeup(hu);
799
}
800
801
/* Called upon a sleep-indication from the device.
802
*/
803
static void device_want_to_sleep(struct hci_uart *hu)
804
{
805
unsigned long flags;
806
struct qca_data *qca = hu->priv;
807
808
BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
809
810
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
811
812
qca->ibs_recv_slps++;
813
814
switch (qca->rx_ibs_state) {
815
case HCI_IBS_RX_AWAKE:
816
/* Update state */
817
qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
818
/* Vote off rx clock under workqueue */
819
queue_work(qca->workqueue, &qca->ws_rx_vote_off);
820
break;
821
822
case HCI_IBS_RX_ASLEEP:
823
break;
824
825
default:
826
/* Any other state is illegal */
827
BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
828
qca->rx_ibs_state);
829
break;
830
}
831
832
wake_up_interruptible(&qca->suspend_wait_q);
833
834
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
835
}
836
837
/* Called upon wake-up-acknowledgement from the device
838
*/
839
static void device_woke_up(struct hci_uart *hu)
840
{
841
unsigned long flags, idle_delay;
842
struct qca_data *qca = hu->priv;
843
struct sk_buff *skb = NULL;
844
845
BT_DBG("hu %p woke up", hu);
846
847
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
848
849
qca->ibs_recv_wacks++;
850
851
/* Don't react to the wake-up-acknowledgment when suspending. */
852
if (test_bit(QCA_SUSPENDING, &qca->flags)) {
853
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
854
return;
855
}
856
857
switch (qca->tx_ibs_state) {
858
case HCI_IBS_TX_AWAKE:
859
/* Expect one if we send 2 WAKEs */
860
BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
861
qca->tx_ibs_state);
862
break;
863
864
case HCI_IBS_TX_WAKING:
865
/* Send pending packets */
866
while ((skb = skb_dequeue(&qca->tx_wait_q)))
867
skb_queue_tail(&qca->txq, skb);
868
869
/* Switch timers and change state to HCI_IBS_TX_AWAKE */
870
timer_delete(&qca->wake_retrans_timer);
871
idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
872
mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
873
qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
874
break;
875
876
case HCI_IBS_TX_ASLEEP:
877
default:
878
BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
879
qca->tx_ibs_state);
880
break;
881
}
882
883
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
884
885
/* Actually send the packets */
886
hci_uart_tx_wakeup(hu);
887
}
888
889
/* Enqueue frame for transmission (padding, crc, etc) may be called from
890
* two simultaneous tasklets.
891
*/
892
static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
893
{
894
unsigned long flags = 0, idle_delay;
895
struct qca_data *qca = hu->priv;
896
897
BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
898
qca->tx_ibs_state);
899
900
if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
901
/* As SSR is in progress, ignore the packets */
902
bt_dev_dbg(hu->hdev, "SSR is in progress");
903
kfree_skb(skb);
904
return 0;
905
}
906
907
/* Prepend skb with frame type */
908
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
909
910
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
911
912
/* Don't go to sleep in middle of patch download or
913
* Out-Of-Band(GPIOs control) sleep is selected.
914
* Don't wake the device up when suspending.
915
*/
916
if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
917
test_bit(QCA_SUSPENDING, &qca->flags)) {
918
skb_queue_tail(&qca->txq, skb);
919
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
920
return 0;
921
}
922
923
/* Act according to current state */
924
switch (qca->tx_ibs_state) {
925
case HCI_IBS_TX_AWAKE:
926
BT_DBG("Device awake, sending normally");
927
skb_queue_tail(&qca->txq, skb);
928
idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
929
mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
930
break;
931
932
case HCI_IBS_TX_ASLEEP:
933
BT_DBG("Device asleep, waking up and queueing packet");
934
/* Save packet for later */
935
skb_queue_tail(&qca->tx_wait_q, skb);
936
937
qca->tx_ibs_state = HCI_IBS_TX_WAKING;
938
/* Schedule a work queue to wake up device */
939
queue_work(qca->workqueue, &qca->ws_awake_device);
940
break;
941
942
case HCI_IBS_TX_WAKING:
943
BT_DBG("Device waking up, queueing packet");
944
/* Transient state; just keep packet for later */
945
skb_queue_tail(&qca->tx_wait_q, skb);
946
break;
947
948
default:
949
BT_ERR("Illegal tx state: %d (losing packet)",
950
qca->tx_ibs_state);
951
dev_kfree_skb_irq(skb);
952
break;
953
}
954
955
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
956
957
return 0;
958
}
959
960
static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
961
{
962
struct hci_uart *hu = hci_get_drvdata(hdev);
963
964
BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
965
966
device_want_to_sleep(hu);
967
968
kfree_skb(skb);
969
return 0;
970
}
971
972
static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
973
{
974
struct hci_uart *hu = hci_get_drvdata(hdev);
975
976
BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
977
978
device_want_to_wakeup(hu);
979
980
kfree_skb(skb);
981
return 0;
982
}
983
984
static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
985
{
986
struct hci_uart *hu = hci_get_drvdata(hdev);
987
988
BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
989
990
device_woke_up(hu);
991
992
kfree_skb(skb);
993
return 0;
994
}
995
996
static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
997
{
998
/* We receive debug logs from chip as an ACL packets.
999
* Instead of sending the data to ACL to decode the
1000
* received data, we are pushing them to the above layers
1001
* as a diagnostic packet.
1002
*/
1003
if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
1004
return hci_recv_diag(hdev, skb);
1005
1006
return hci_recv_frame(hdev, skb);
1007
}
1008
1009
static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
1010
{
1011
struct hci_uart *hu = hci_get_drvdata(hdev);
1012
struct qca_data *qca = hu->priv;
1013
char buf[80];
1014
1015
snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n",
1016
qca->controller_id);
1017
skb_put_data(skb, buf, strlen(buf));
1018
1019
snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n",
1020
qca->fw_version);
1021
skb_put_data(skb, buf, strlen(buf));
1022
1023
snprintf(buf, sizeof(buf), "Vendor:Qualcomm\n");
1024
skb_put_data(skb, buf, strlen(buf));
1025
1026
snprintf(buf, sizeof(buf), "Driver: %s\n",
1027
hu->serdev->dev.driver->name);
1028
skb_put_data(skb, buf, strlen(buf));
1029
}
1030
1031
static void qca_controller_memdump(struct work_struct *work)
1032
{
1033
struct qca_data *qca = container_of(work, struct qca_data,
1034
ctrl_memdump_evt);
1035
struct hci_uart *hu = qca->hu;
1036
struct sk_buff *skb;
1037
struct qca_memdump_event_hdr *cmd_hdr;
1038
struct qca_memdump_info *qca_memdump = qca->qca_memdump;
1039
struct qca_dump_size *dump;
1040
u16 seq_no;
1041
u32 rx_size;
1042
int ret = 0;
1043
enum qca_btsoc_type soc_type = qca_soc_type(hu);
1044
1045
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
1046
1047
mutex_lock(&qca->hci_memdump_lock);
1048
/* Skip processing the received packets if timeout detected
1049
* or memdump collection completed.
1050
*/
1051
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1052
qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1053
mutex_unlock(&qca->hci_memdump_lock);
1054
return;
1055
}
1056
1057
if (!qca_memdump) {
1058
qca_memdump = kzalloc(sizeof(*qca_memdump), GFP_ATOMIC);
1059
if (!qca_memdump) {
1060
mutex_unlock(&qca->hci_memdump_lock);
1061
return;
1062
}
1063
1064
qca->qca_memdump = qca_memdump;
1065
}
1066
1067
qca->memdump_state = QCA_MEMDUMP_COLLECTING;
1068
cmd_hdr = (void *) skb->data;
1069
seq_no = __le16_to_cpu(cmd_hdr->seq_no);
1070
skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
1071
1072
if (!seq_no) {
1073
1074
/* This is the first frame of memdump packet from
1075
* the controller, Disable IBS to receive dump
1076
* with out any interruption, ideally time required for
1077
* the controller to send the dump is 8 seconds. let us
1078
* start timer to handle this asynchronous activity.
1079
*/
1080
set_bit(QCA_IBS_DISABLED, &qca->flags);
1081
set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1082
dump = (void *) skb->data;
1083
qca_memdump->ram_dump_size = __le32_to_cpu(dump->dump_size);
1084
if (!(qca_memdump->ram_dump_size)) {
1085
bt_dev_err(hu->hdev, "Rx invalid memdump size");
1086
kfree(qca_memdump);
1087
kfree_skb(skb);
1088
mutex_unlock(&qca->hci_memdump_lock);
1089
return;
1090
}
1091
1092
queue_delayed_work(qca->workqueue,
1093
&qca->ctrl_memdump_timeout,
1094
msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
1095
skb_pull(skb, sizeof(qca_memdump->ram_dump_size));
1096
qca_memdump->current_seq_no = 0;
1097
qca_memdump->received_dump = 0;
1098
ret = hci_devcd_init(hu->hdev, qca_memdump->ram_dump_size);
1099
bt_dev_info(hu->hdev, "hci_devcd_init Return:%d",
1100
ret);
1101
if (ret < 0) {
1102
kfree(qca->qca_memdump);
1103
qca->qca_memdump = NULL;
1104
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1105
cancel_delayed_work(&qca->ctrl_memdump_timeout);
1106
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1107
clear_bit(QCA_IBS_DISABLED, &qca->flags);
1108
mutex_unlock(&qca->hci_memdump_lock);
1109
return;
1110
}
1111
1112
bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
1113
qca_memdump->ram_dump_size);
1114
1115
}
1116
1117
/* If sequence no 0 is missed then there is no point in
1118
* accepting the other sequences.
1119
*/
1120
if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
1121
bt_dev_err(hu->hdev, "QCA: Discarding other packets");
1122
kfree(qca_memdump);
1123
kfree_skb(skb);
1124
mutex_unlock(&qca->hci_memdump_lock);
1125
return;
1126
}
1127
/* There could be chance of missing some packets from
1128
* the controller. In such cases let us store the dummy
1129
* packets in the buffer.
1130
*/
1131
/* For QCA6390, controller does not lost packets but
1132
* sequence number field of packet sometimes has error
1133
* bits, so skip this checking for missing packet.
1134
*/
1135
while ((seq_no > qca_memdump->current_seq_no + 1) &&
1136
(soc_type != QCA_QCA6390) &&
1137
seq_no != QCA_LAST_SEQUENCE_NUM) {
1138
bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
1139
qca_memdump->current_seq_no);
1140
rx_size = qca_memdump->received_dump;
1141
rx_size += QCA_DUMP_PACKET_SIZE;
1142
if (rx_size > qca_memdump->ram_dump_size) {
1143
bt_dev_err(hu->hdev,
1144
"QCA memdump received %d, no space for missed packet",
1145
qca_memdump->received_dump);
1146
break;
1147
}
1148
hci_devcd_append_pattern(hu->hdev, 0x00,
1149
QCA_DUMP_PACKET_SIZE);
1150
qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
1151
qca_memdump->current_seq_no++;
1152
}
1153
1154
rx_size = qca_memdump->received_dump + skb->len;
1155
if (rx_size <= qca_memdump->ram_dump_size) {
1156
if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
1157
(seq_no != qca_memdump->current_seq_no)) {
1158
bt_dev_err(hu->hdev,
1159
"QCA memdump unexpected packet %d",
1160
seq_no);
1161
}
1162
bt_dev_dbg(hu->hdev,
1163
"QCA memdump packet %d with length %d",
1164
seq_no, skb->len);
1165
hci_devcd_append(hu->hdev, skb);
1166
qca_memdump->current_seq_no += 1;
1167
qca_memdump->received_dump = rx_size;
1168
} else {
1169
bt_dev_err(hu->hdev,
1170
"QCA memdump received no space for packet %d",
1171
qca_memdump->current_seq_no);
1172
}
1173
1174
if (seq_no == QCA_LAST_SEQUENCE_NUM) {
1175
bt_dev_info(hu->hdev,
1176
"QCA memdump Done, received %d, total %d",
1177
qca_memdump->received_dump,
1178
qca_memdump->ram_dump_size);
1179
hci_devcd_complete(hu->hdev);
1180
cancel_delayed_work(&qca->ctrl_memdump_timeout);
1181
kfree(qca->qca_memdump);
1182
qca->qca_memdump = NULL;
1183
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1184
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1185
}
1186
1187
mutex_unlock(&qca->hci_memdump_lock);
1188
}
1189
1190
}
1191
1192
static int qca_controller_memdump_event(struct hci_dev *hdev,
1193
struct sk_buff *skb)
1194
{
1195
struct hci_uart *hu = hci_get_drvdata(hdev);
1196
struct qca_data *qca = hu->priv;
1197
1198
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1199
skb_queue_tail(&qca->rx_memdump_q, skb);
1200
queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
1201
1202
return 0;
1203
}
1204
1205
static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1206
{
1207
struct hci_uart *hu = hci_get_drvdata(hdev);
1208
struct qca_data *qca = hu->priv;
1209
1210
if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
1211
struct hci_event_hdr *hdr = (void *)skb->data;
1212
1213
/* For the WCN3990 the vendor command for a baudrate change
1214
* isn't sent as synchronous HCI command, because the
1215
* controller sends the corresponding vendor event with the
1216
* new baudrate. The event is received and properly decoded
1217
* after changing the baudrate of the host port. It needs to
1218
* be dropped, otherwise it can be misinterpreted as
1219
* response to a later firmware download command (also a
1220
* vendor command).
1221
*/
1222
1223
if (hdr->evt == HCI_EV_VENDOR)
1224
complete(&qca->drop_ev_comp);
1225
1226
kfree_skb(skb);
1227
1228
return 0;
1229
}
1230
/* We receive chip memory dump as an event packet, With a dedicated
1231
* handler followed by a hardware error event. When this event is
1232
* received we store dump into a file before closing hci. This
1233
* dump will help in triaging the issues.
1234
*/
1235
if ((skb->data[0] == HCI_VENDOR_PKT) &&
1236
(get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
1237
return qca_controller_memdump_event(hdev, skb);
1238
1239
return hci_recv_frame(hdev, skb);
1240
}
1241
1242
#define QCA_IBS_SLEEP_IND_EVENT \
1243
.type = HCI_IBS_SLEEP_IND, \
1244
.hlen = 0, \
1245
.loff = 0, \
1246
.lsize = 0, \
1247
.maxlen = HCI_MAX_IBS_SIZE
1248
1249
#define QCA_IBS_WAKE_IND_EVENT \
1250
.type = HCI_IBS_WAKE_IND, \
1251
.hlen = 0, \
1252
.loff = 0, \
1253
.lsize = 0, \
1254
.maxlen = HCI_MAX_IBS_SIZE
1255
1256
#define QCA_IBS_WAKE_ACK_EVENT \
1257
.type = HCI_IBS_WAKE_ACK, \
1258
.hlen = 0, \
1259
.loff = 0, \
1260
.lsize = 0, \
1261
.maxlen = HCI_MAX_IBS_SIZE
1262
1263
static const struct h4_recv_pkt qca_recv_pkts[] = {
1264
{ H4_RECV_ACL, .recv = qca_recv_acl_data },
1265
{ H4_RECV_SCO, .recv = hci_recv_frame },
1266
{ H4_RECV_EVENT, .recv = qca_recv_event },
1267
{ H4_RECV_ISO, .recv = hci_recv_frame },
1268
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
1269
{ QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
1270
{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
1271
};
1272
1273
static int qca_recv(struct hci_uart *hu, const void *data, int count)
1274
{
1275
struct qca_data *qca = hu->priv;
1276
1277
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
1278
return -EUNATCH;
1279
1280
qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
1281
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
1282
if (IS_ERR(qca->rx_skb)) {
1283
int err = PTR_ERR(qca->rx_skb);
1284
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
1285
qca->rx_skb = NULL;
1286
return err;
1287
}
1288
1289
return count;
1290
}
1291
1292
static struct sk_buff *qca_dequeue(struct hci_uart *hu)
1293
{
1294
struct qca_data *qca = hu->priv;
1295
1296
return skb_dequeue(&qca->txq);
1297
}
1298
1299
static uint8_t qca_get_baudrate_value(int speed)
1300
{
1301
switch (speed) {
1302
case 9600:
1303
return QCA_BAUDRATE_9600;
1304
case 19200:
1305
return QCA_BAUDRATE_19200;
1306
case 38400:
1307
return QCA_BAUDRATE_38400;
1308
case 57600:
1309
return QCA_BAUDRATE_57600;
1310
case 115200:
1311
return QCA_BAUDRATE_115200;
1312
case 230400:
1313
return QCA_BAUDRATE_230400;
1314
case 460800:
1315
return QCA_BAUDRATE_460800;
1316
case 500000:
1317
return QCA_BAUDRATE_500000;
1318
case 921600:
1319
return QCA_BAUDRATE_921600;
1320
case 1000000:
1321
return QCA_BAUDRATE_1000000;
1322
case 2000000:
1323
return QCA_BAUDRATE_2000000;
1324
case 3000000:
1325
return QCA_BAUDRATE_3000000;
1326
case 3200000:
1327
return QCA_BAUDRATE_3200000;
1328
case 3500000:
1329
return QCA_BAUDRATE_3500000;
1330
default:
1331
return QCA_BAUDRATE_115200;
1332
}
1333
}
1334
1335
static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
1336
{
1337
struct hci_uart *hu = hci_get_drvdata(hdev);
1338
struct qca_data *qca = hu->priv;
1339
struct sk_buff *skb;
1340
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
1341
1342
if (baudrate > QCA_BAUDRATE_3200000)
1343
return -EINVAL;
1344
1345
cmd[4] = baudrate;
1346
1347
skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
1348
if (!skb) {
1349
bt_dev_err(hdev, "Failed to allocate baudrate packet");
1350
return -ENOMEM;
1351
}
1352
1353
/* Assign commands to change baudrate and packet type. */
1354
skb_put_data(skb, cmd, sizeof(cmd));
1355
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1356
1357
skb_queue_tail(&qca->txq, skb);
1358
hci_uart_tx_wakeup(hu);
1359
1360
/* Wait for the baudrate change request to be sent */
1361
1362
while (!skb_queue_empty(&qca->txq))
1363
usleep_range(100, 200);
1364
1365
if (hu->serdev)
1366
serdev_device_wait_until_sent(hu->serdev,
1367
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
1368
1369
/* Give the controller time to process the request */
1370
switch (qca_soc_type(hu)) {
1371
case QCA_WCN3950:
1372
case QCA_WCN3988:
1373
case QCA_WCN3990:
1374
case QCA_WCN3991:
1375
case QCA_WCN3998:
1376
case QCA_WCN6750:
1377
case QCA_WCN6855:
1378
case QCA_WCN7850:
1379
usleep_range(1000, 10000);
1380
break;
1381
1382
default:
1383
msleep(300);
1384
}
1385
1386
return 0;
1387
}
1388
1389
static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
1390
{
1391
if (hu->serdev)
1392
serdev_device_set_baudrate(hu->serdev, speed);
1393
else
1394
hci_uart_set_baudrate(hu, speed);
1395
}
1396
1397
static int qca_send_power_pulse(struct hci_uart *hu, bool on)
1398
{
1399
int ret;
1400
int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
1401
u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE;
1402
1403
/* These power pulses are single byte command which are sent
1404
* at required baudrate to wcn3990. On wcn3990, we have an external
1405
* circuit at Tx pin which decodes the pulse sent at specific baudrate.
1406
* For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
1407
* and also we use the same power inputs to turn on and off for
1408
* Wi-Fi/BT. Powering up the power sources will not enable BT, until
1409
* we send a power on pulse at 115200 bps. This algorithm will help to
1410
* save power. Disabling hardware flow control is mandatory while
1411
* sending power pulses to SoC.
1412
*/
1413
bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd);
1414
1415
serdev_device_write_flush(hu->serdev);
1416
hci_uart_set_flow_control(hu, true);
1417
ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
1418
if (ret < 0) {
1419
bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd);
1420
return ret;
1421
}
1422
1423
serdev_device_wait_until_sent(hu->serdev, timeout);
1424
hci_uart_set_flow_control(hu, false);
1425
1426
/* Give to controller time to boot/shutdown */
1427
if (on)
1428
msleep(100);
1429
else
1430
usleep_range(1000, 10000);
1431
1432
return 0;
1433
}
1434
1435
static unsigned int qca_get_speed(struct hci_uart *hu,
1436
enum qca_speed_type speed_type)
1437
{
1438
unsigned int speed = 0;
1439
1440
if (speed_type == QCA_INIT_SPEED) {
1441
if (hu->init_speed)
1442
speed = hu->init_speed;
1443
else if (hu->proto->init_speed)
1444
speed = hu->proto->init_speed;
1445
} else {
1446
if (hu->oper_speed)
1447
speed = hu->oper_speed;
1448
else if (hu->proto->oper_speed)
1449
speed = hu->proto->oper_speed;
1450
}
1451
1452
return speed;
1453
}
1454
1455
static int qca_check_speeds(struct hci_uart *hu)
1456
{
1457
switch (qca_soc_type(hu)) {
1458
case QCA_WCN3950:
1459
case QCA_WCN3988:
1460
case QCA_WCN3990:
1461
case QCA_WCN3991:
1462
case QCA_WCN3998:
1463
case QCA_WCN6750:
1464
case QCA_WCN6855:
1465
case QCA_WCN7850:
1466
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
1467
!qca_get_speed(hu, QCA_OPER_SPEED))
1468
return -EINVAL;
1469
break;
1470
1471
default:
1472
if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
1473
!qca_get_speed(hu, QCA_OPER_SPEED))
1474
return -EINVAL;
1475
}
1476
1477
return 0;
1478
}
1479
1480
static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
1481
{
1482
unsigned int speed, qca_baudrate;
1483
struct qca_data *qca = hu->priv;
1484
int ret = 0;
1485
1486
if (speed_type == QCA_INIT_SPEED) {
1487
speed = qca_get_speed(hu, QCA_INIT_SPEED);
1488
if (speed)
1489
host_set_baudrate(hu, speed);
1490
} else {
1491
enum qca_btsoc_type soc_type = qca_soc_type(hu);
1492
1493
speed = qca_get_speed(hu, QCA_OPER_SPEED);
1494
if (!speed)
1495
return 0;
1496
1497
/* Disable flow control for wcn3990 to deassert RTS while
1498
* changing the baudrate of chip and host.
1499
*/
1500
switch (soc_type) {
1501
case QCA_WCN3950:
1502
case QCA_WCN3988:
1503
case QCA_WCN3990:
1504
case QCA_WCN3991:
1505
case QCA_WCN3998:
1506
case QCA_WCN6750:
1507
case QCA_WCN6855:
1508
case QCA_WCN7850:
1509
hci_uart_set_flow_control(hu, true);
1510
break;
1511
1512
default:
1513
break;
1514
}
1515
1516
switch (soc_type) {
1517
case QCA_WCN3990:
1518
reinit_completion(&qca->drop_ev_comp);
1519
set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1520
break;
1521
1522
default:
1523
break;
1524
}
1525
1526
qca_baudrate = qca_get_baudrate_value(speed);
1527
bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
1528
ret = qca_set_baudrate(hu->hdev, qca_baudrate);
1529
if (ret)
1530
goto error;
1531
1532
host_set_baudrate(hu, speed);
1533
1534
error:
1535
switch (soc_type) {
1536
case QCA_WCN3950:
1537
case QCA_WCN3988:
1538
case QCA_WCN3990:
1539
case QCA_WCN3991:
1540
case QCA_WCN3998:
1541
case QCA_WCN6750:
1542
case QCA_WCN6855:
1543
case QCA_WCN7850:
1544
hci_uart_set_flow_control(hu, false);
1545
break;
1546
1547
default:
1548
break;
1549
}
1550
1551
switch (soc_type) {
1552
case QCA_WCN3990:
1553
/* Wait for the controller to send the vendor event
1554
* for the baudrate change command.
1555
*/
1556
if (!wait_for_completion_timeout(&qca->drop_ev_comp,
1557
msecs_to_jiffies(100))) {
1558
bt_dev_err(hu->hdev,
1559
"Failed to change controller baudrate\n");
1560
ret = -ETIMEDOUT;
1561
}
1562
1563
clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1564
break;
1565
1566
default:
1567
break;
1568
}
1569
}
1570
1571
return ret;
1572
}
1573
1574
static int qca_send_crashbuffer(struct hci_uart *hu)
1575
{
1576
struct qca_data *qca = hu->priv;
1577
struct sk_buff *skb;
1578
1579
skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
1580
if (!skb) {
1581
bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
1582
return -ENOMEM;
1583
}
1584
1585
/* We forcefully crash the controller, by sending 0xfb byte for
1586
* 1024 times. We also might have chance of losing data, To be
1587
* on safer side we send 1096 bytes to the SoC.
1588
*/
1589
memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
1590
QCA_CRASHBYTE_PACKET_LEN);
1591
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1592
bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
1593
skb_queue_tail(&qca->txq, skb);
1594
hci_uart_tx_wakeup(hu);
1595
1596
return 0;
1597
}
1598
1599
static void qca_wait_for_dump_collection(struct hci_dev *hdev)
1600
{
1601
struct hci_uart *hu = hci_get_drvdata(hdev);
1602
struct qca_data *qca = hu->priv;
1603
1604
wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
1605
TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
1606
1607
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1608
}
1609
1610
static void qca_hw_error(struct hci_dev *hdev, u8 code)
1611
{
1612
struct hci_uart *hu = hci_get_drvdata(hdev);
1613
struct qca_data *qca = hu->priv;
1614
1615
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1616
set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1617
bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
1618
1619
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1620
/* If hardware error event received for other than QCA
1621
* soc memory dump event, then we need to crash the SOC
1622
* and wait here for 8 seconds to get the dump packets.
1623
* This will block main thread to be on hold until we
1624
* collect dump.
1625
*/
1626
set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1627
qca_send_crashbuffer(hu);
1628
qca_wait_for_dump_collection(hdev);
1629
} else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1630
/* Let us wait here until memory dump collected or
1631
* memory dump timer expired.
1632
*/
1633
bt_dev_info(hdev, "waiting for dump to complete");
1634
qca_wait_for_dump_collection(hdev);
1635
}
1636
1637
mutex_lock(&qca->hci_memdump_lock);
1638
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1639
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
1640
hci_devcd_abort(hu->hdev);
1641
if (qca->qca_memdump) {
1642
kfree(qca->qca_memdump);
1643
qca->qca_memdump = NULL;
1644
}
1645
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1646
cancel_delayed_work(&qca->ctrl_memdump_timeout);
1647
}
1648
mutex_unlock(&qca->hci_memdump_lock);
1649
1650
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1651
qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1652
cancel_work_sync(&qca->ctrl_memdump_evt);
1653
skb_queue_purge(&qca->rx_memdump_q);
1654
}
1655
1656
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1657
}
1658
1659
static void qca_reset(struct hci_dev *hdev)
1660
{
1661
struct hci_uart *hu = hci_get_drvdata(hdev);
1662
struct qca_data *qca = hu->priv;
1663
1664
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1665
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1666
set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1667
qca_send_crashbuffer(hu);
1668
qca_wait_for_dump_collection(hdev);
1669
} else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1670
/* Let us wait here until memory dump collected or
1671
* memory dump timer expired.
1672
*/
1673
bt_dev_info(hdev, "waiting for dump to complete");
1674
qca_wait_for_dump_collection(hdev);
1675
}
1676
1677
mutex_lock(&qca->hci_memdump_lock);
1678
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1679
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1680
if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
1681
/* Inject hw error event to reset the device
1682
* and driver.
1683
*/
1684
hci_reset_dev(hu->hdev);
1685
}
1686
}
1687
mutex_unlock(&qca->hci_memdump_lock);
1688
}
1689
1690
static bool qca_wakeup(struct hci_dev *hdev)
1691
{
1692
struct hci_uart *hu = hci_get_drvdata(hdev);
1693
bool wakeup;
1694
1695
if (!hu->serdev)
1696
return true;
1697
1698
/* BT SoC attached through the serial bus is handled by the serdev driver.
1699
* So we need to use the device handle of the serdev driver to get the
1700
* status of device may wakeup.
1701
*/
1702
wakeup = device_may_wakeup(&hu->serdev->ctrl->dev);
1703
bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
1704
1705
return wakeup;
1706
}
1707
1708
static int qca_port_reopen(struct hci_uart *hu)
1709
{
1710
int ret;
1711
1712
/* Now the device is in ready state to communicate with host.
1713
* To sync host with device we need to reopen port.
1714
* Without this, we will have RTS and CTS synchronization
1715
* issues.
1716
*/
1717
serdev_device_close(hu->serdev);
1718
ret = serdev_device_open(hu->serdev);
1719
if (ret) {
1720
bt_dev_err(hu->hdev, "failed to open port");
1721
return ret;
1722
}
1723
1724
hci_uart_set_flow_control(hu, false);
1725
1726
return 0;
1727
}
1728
1729
static int qca_regulator_init(struct hci_uart *hu)
1730
{
1731
enum qca_btsoc_type soc_type = qca_soc_type(hu);
1732
struct qca_serdev *qcadev;
1733
int ret;
1734
bool sw_ctrl_state;
1735
1736
/* Check for vregs status, may be hci down has turned
1737
* off the voltage regulator.
1738
*/
1739
qcadev = serdev_device_get_drvdata(hu->serdev);
1740
1741
if (!qcadev->bt_power->vregs_on) {
1742
serdev_device_close(hu->serdev);
1743
ret = qca_regulator_enable(qcadev);
1744
if (ret)
1745
return ret;
1746
1747
ret = serdev_device_open(hu->serdev);
1748
if (ret) {
1749
bt_dev_err(hu->hdev, "failed to open port");
1750
return ret;
1751
}
1752
}
1753
1754
switch (soc_type) {
1755
case QCA_WCN3950:
1756
case QCA_WCN3988:
1757
case QCA_WCN3990:
1758
case QCA_WCN3991:
1759
case QCA_WCN3998:
1760
/* Forcefully enable wcn399x to enter in to boot mode. */
1761
host_set_baudrate(hu, 2400);
1762
ret = qca_send_power_pulse(hu, false);
1763
if (ret)
1764
return ret;
1765
break;
1766
1767
default:
1768
break;
1769
}
1770
1771
/* For wcn6750 need to enable gpio bt_en */
1772
if (qcadev->bt_en) {
1773
gpiod_set_value_cansleep(qcadev->bt_en, 0);
1774
msleep(50);
1775
gpiod_set_value_cansleep(qcadev->bt_en, 1);
1776
msleep(50);
1777
if (qcadev->sw_ctrl) {
1778
sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
1779
bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state);
1780
}
1781
}
1782
1783
qca_set_speed(hu, QCA_INIT_SPEED);
1784
1785
switch (soc_type) {
1786
case QCA_WCN3950:
1787
case QCA_WCN3988:
1788
case QCA_WCN3990:
1789
case QCA_WCN3991:
1790
case QCA_WCN3998:
1791
ret = qca_send_power_pulse(hu, true);
1792
if (ret)
1793
return ret;
1794
break;
1795
1796
default:
1797
break;
1798
}
1799
1800
return qca_port_reopen(hu);
1801
}
1802
1803
static int qca_power_on(struct hci_dev *hdev)
1804
{
1805
struct hci_uart *hu = hci_get_drvdata(hdev);
1806
enum qca_btsoc_type soc_type = qca_soc_type(hu);
1807
struct qca_serdev *qcadev;
1808
struct qca_data *qca = hu->priv;
1809
int ret = 0;
1810
1811
/* Non-serdev device usually is powered by external power
1812
* and don't need additional action in driver for power on
1813
*/
1814
if (!hu->serdev)
1815
return 0;
1816
1817
switch (soc_type) {
1818
case QCA_WCN3950:
1819
case QCA_WCN3988:
1820
case QCA_WCN3990:
1821
case QCA_WCN3991:
1822
case QCA_WCN3998:
1823
case QCA_WCN6750:
1824
case QCA_WCN6855:
1825
case QCA_WCN7850:
1826
case QCA_QCA6390:
1827
ret = qca_regulator_init(hu);
1828
break;
1829
1830
default:
1831
qcadev = serdev_device_get_drvdata(hu->serdev);
1832
if (qcadev->bt_en) {
1833
gpiod_set_value_cansleep(qcadev->bt_en, 1);
1834
/* Controller needs time to bootup. */
1835
msleep(150);
1836
}
1837
}
1838
1839
clear_bit(QCA_BT_OFF, &qca->flags);
1840
return ret;
1841
}
1842
1843
static void hci_coredump_qca(struct hci_dev *hdev)
1844
{
1845
int err;
1846
static const u8 param[] = { 0x26 };
1847
1848
err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
1849
if (err < 0)
1850
bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
1851
}
1852
1853
static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
1854
{
1855
/* QCA uses 1 as non-HCI data path id for HFP */
1856
*data_path_id = 1;
1857
return 0;
1858
}
1859
1860
static int qca_configure_hfp_offload(struct hci_dev *hdev)
1861
{
1862
bt_dev_info(hdev, "HFP non-HCI data transport is supported");
1863
hdev->get_data_path_id = qca_get_data_path_id;
1864
/* Do not need to send HCI_Configure_Data_Path to configure non-HCI
1865
* data transport path for QCA controllers, so set below field as NULL.
1866
*/
1867
hdev->get_codec_config_data = NULL;
1868
return 0;
1869
}
1870
1871
static int qca_setup(struct hci_uart *hu)
1872
{
1873
struct hci_dev *hdev = hu->hdev;
1874
struct qca_data *qca = hu->priv;
1875
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
1876
unsigned int retries = 0;
1877
enum qca_btsoc_type soc_type = qca_soc_type(hu);
1878
const char *firmware_name = qca_get_firmware_name(hu);
1879
const char *rampatch_name = qca_get_rampatch_name(hu);
1880
int ret;
1881
struct qca_btsoc_version ver;
1882
struct qca_serdev *qcadev;
1883
const char *soc_name;
1884
1885
ret = qca_check_speeds(hu);
1886
if (ret)
1887
return ret;
1888
1889
clear_bit(QCA_ROM_FW, &qca->flags);
1890
/* Patch downloading has to be done without IBS mode */
1891
set_bit(QCA_IBS_DISABLED, &qca->flags);
1892
1893
/* Enable controller to do both LE scan and BR/EDR inquiry
1894
* simultaneously.
1895
*/
1896
hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
1897
1898
switch (soc_type) {
1899
case QCA_QCA2066:
1900
soc_name = "qca2066";
1901
break;
1902
1903
case QCA_WCN3950:
1904
case QCA_WCN3988:
1905
case QCA_WCN3990:
1906
case QCA_WCN3991:
1907
case QCA_WCN3998:
1908
soc_name = "wcn399x";
1909
break;
1910
1911
case QCA_WCN6750:
1912
soc_name = "wcn6750";
1913
break;
1914
1915
case QCA_WCN6855:
1916
soc_name = "wcn6855";
1917
break;
1918
1919
case QCA_WCN7850:
1920
soc_name = "wcn7850";
1921
break;
1922
1923
default:
1924
soc_name = "ROME/QCA6390";
1925
}
1926
bt_dev_info(hdev, "setting up %s", soc_name);
1927
1928
qca->memdump_state = QCA_MEMDUMP_IDLE;
1929
1930
retry:
1931
ret = qca_power_on(hdev);
1932
if (ret)
1933
goto out;
1934
1935
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
1936
1937
switch (soc_type) {
1938
case QCA_WCN3950:
1939
case QCA_WCN3988:
1940
case QCA_WCN3990:
1941
case QCA_WCN3991:
1942
case QCA_WCN3998:
1943
case QCA_WCN6750:
1944
case QCA_WCN6855:
1945
case QCA_WCN7850:
1946
qcadev = serdev_device_get_drvdata(hu->serdev);
1947
if (qcadev->bdaddr_property_broken)
1948
hci_set_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN);
1949
1950
hci_set_aosp_capable(hdev);
1951
1952
ret = qca_read_soc_version(hdev, &ver, soc_type);
1953
if (ret)
1954
goto out;
1955
break;
1956
1957
default:
1958
qca_set_speed(hu, QCA_INIT_SPEED);
1959
}
1960
1961
/* Setup user speed if needed */
1962
speed = qca_get_speed(hu, QCA_OPER_SPEED);
1963
if (speed) {
1964
ret = qca_set_speed(hu, QCA_OPER_SPEED);
1965
if (ret)
1966
goto out;
1967
1968
qca_baudrate = qca_get_baudrate_value(speed);
1969
}
1970
1971
switch (soc_type) {
1972
case QCA_WCN3950:
1973
case QCA_WCN3988:
1974
case QCA_WCN3990:
1975
case QCA_WCN3991:
1976
case QCA_WCN3998:
1977
case QCA_WCN6750:
1978
case QCA_WCN6855:
1979
case QCA_WCN7850:
1980
break;
1981
1982
default:
1983
/* Get QCA version information */
1984
ret = qca_read_soc_version(hdev, &ver, soc_type);
1985
if (ret)
1986
goto out;
1987
}
1988
1989
/* Setup patch / NVM configurations */
1990
ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver,
1991
firmware_name, rampatch_name);
1992
if (!ret) {
1993
clear_bit(QCA_IBS_DISABLED, &qca->flags);
1994
qca_debugfs_init(hdev);
1995
hu->hdev->hw_error = qca_hw_error;
1996
hu->hdev->reset = qca_reset;
1997
if (hu->serdev) {
1998
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
1999
hu->hdev->wakeup = qca_wakeup;
2000
}
2001
} else if (ret == -ENOENT) {
2002
/* No patch/nvm-config found, run with original fw/config */
2003
set_bit(QCA_ROM_FW, &qca->flags);
2004
ret = 0;
2005
} else if (ret == -EAGAIN) {
2006
/*
2007
* Userspace firmware loader will return -EAGAIN in case no
2008
* patch/nvm-config is found, so run with original fw/config.
2009
*/
2010
set_bit(QCA_ROM_FW, &qca->flags);
2011
ret = 0;
2012
}
2013
2014
out:
2015
if (ret && retries < MAX_INIT_RETRIES) {
2016
bt_dev_warn(hdev, "Retry BT power ON:%d", retries);
2017
qca_power_shutdown(hu);
2018
if (hu->serdev) {
2019
serdev_device_close(hu->serdev);
2020
ret = serdev_device_open(hu->serdev);
2021
if (ret) {
2022
bt_dev_err(hdev, "failed to open port");
2023
return ret;
2024
}
2025
}
2026
retries++;
2027
goto retry;
2028
}
2029
2030
/* Setup bdaddr */
2031
if (soc_type == QCA_ROME)
2032
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
2033
else
2034
hu->hdev->set_bdaddr = qca_set_bdaddr;
2035
2036
if (soc_type == QCA_QCA2066)
2037
qca_configure_hfp_offload(hdev);
2038
2039
qca->fw_version = le16_to_cpu(ver.patch_ver);
2040
qca->controller_id = le16_to_cpu(ver.rom_ver);
2041
hci_devcd_register(hdev, hci_coredump_qca, qca_dmp_hdr, NULL);
2042
2043
return ret;
2044
}
2045
2046
static const struct hci_uart_proto qca_proto = {
2047
.id = HCI_UART_QCA,
2048
.name = "QCA",
2049
.manufacturer = 29,
2050
.init_speed = 115200,
2051
.oper_speed = 3000000,
2052
.open = qca_open,
2053
.close = qca_close,
2054
.flush = qca_flush,
2055
.setup = qca_setup,
2056
.recv = qca_recv,
2057
.enqueue = qca_enqueue,
2058
.dequeue = qca_dequeue,
2059
};
2060
2061
static const struct qca_device_data qca_soc_data_wcn3950 __maybe_unused = {
2062
.soc_type = QCA_WCN3950,
2063
.vregs = (struct qca_vreg []) {
2064
{ "vddio", 15000 },
2065
{ "vddxo", 60000 },
2066
{ "vddrf", 155000 },
2067
{ "vddch0", 585000 },
2068
},
2069
.num_vregs = 4,
2070
};
2071
2072
static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
2073
.soc_type = QCA_WCN3988,
2074
.vregs = (struct qca_vreg []) {
2075
{ "vddio", 15000 },
2076
{ "vddxo", 80000 },
2077
{ "vddrf", 300000 },
2078
{ "vddch0", 450000 },
2079
},
2080
.num_vregs = 4,
2081
};
2082
2083
static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = {
2084
.soc_type = QCA_WCN3990,
2085
.vregs = (struct qca_vreg []) {
2086
{ "vddio", 15000 },
2087
{ "vddxo", 80000 },
2088
{ "vddrf", 300000 },
2089
{ "vddch0", 450000 },
2090
},
2091
.num_vregs = 4,
2092
};
2093
2094
static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = {
2095
.soc_type = QCA_WCN3991,
2096
.vregs = (struct qca_vreg []) {
2097
{ "vddio", 15000 },
2098
{ "vddxo", 80000 },
2099
{ "vddrf", 300000 },
2100
{ "vddch0", 450000 },
2101
},
2102
.num_vregs = 4,
2103
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
2104
};
2105
2106
static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
2107
.soc_type = QCA_WCN3998,
2108
.vregs = (struct qca_vreg []) {
2109
{ "vddio", 10000 },
2110
{ "vddxo", 80000 },
2111
{ "vddrf", 300000 },
2112
{ "vddch0", 450000 },
2113
},
2114
.num_vregs = 4,
2115
};
2116
2117
static const struct qca_device_data qca_soc_data_qca2066 __maybe_unused = {
2118
.soc_type = QCA_QCA2066,
2119
.num_vregs = 0,
2120
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
2121
};
2122
2123
static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
2124
.soc_type = QCA_QCA6390,
2125
.num_vregs = 0,
2126
};
2127
2128
static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = {
2129
.soc_type = QCA_WCN6750,
2130
.vregs = (struct qca_vreg []) {
2131
{ "vddio", 5000 },
2132
{ "vddaon", 26000 },
2133
{ "vddbtcxmx", 126000 },
2134
{ "vddrfacmn", 12500 },
2135
{ "vddrfa0p8", 102000 },
2136
{ "vddrfa1p7", 302000 },
2137
{ "vddrfa1p2", 257000 },
2138
{ "vddrfa2p2", 1700000 },
2139
{ "vddasd", 200 },
2140
},
2141
.num_vregs = 9,
2142
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
2143
};
2144
2145
static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = {
2146
.soc_type = QCA_WCN6855,
2147
.vregs = (struct qca_vreg []) {
2148
{ "vddio", 5000 },
2149
{ "vddbtcxmx", 126000 },
2150
{ "vddrfacmn", 12500 },
2151
{ "vddrfa0p8", 102000 },
2152
{ "vddrfa1p7", 302000 },
2153
{ "vddrfa1p2", 257000 },
2154
},
2155
.num_vregs = 6,
2156
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
2157
};
2158
2159
static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = {
2160
.soc_type = QCA_WCN7850,
2161
.vregs = (struct qca_vreg []) {
2162
{ "vddio", 5000 },
2163
{ "vddaon", 26000 },
2164
{ "vdddig", 126000 },
2165
{ "vddrfa0p8", 102000 },
2166
{ "vddrfa1p2", 257000 },
2167
{ "vddrfa1p9", 302000 },
2168
},
2169
.num_vregs = 6,
2170
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
2171
};
2172
2173
static void qca_power_shutdown(struct hci_uart *hu)
2174
{
2175
struct qca_serdev *qcadev;
2176
struct qca_data *qca = hu->priv;
2177
unsigned long flags;
2178
enum qca_btsoc_type soc_type = qca_soc_type(hu);
2179
bool sw_ctrl_state;
2180
struct qca_power *power;
2181
2182
/* From this point we go into power off state. But serial port is
2183
* still open, stop queueing the IBS data and flush all the buffered
2184
* data in skb's.
2185
*/
2186
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
2187
set_bit(QCA_IBS_DISABLED, &qca->flags);
2188
qca_flush(hu);
2189
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2190
2191
/* Non-serdev device usually is powered by external power
2192
* and don't need additional action in driver for power down
2193
*/
2194
if (!hu->serdev)
2195
return;
2196
2197
qcadev = serdev_device_get_drvdata(hu->serdev);
2198
power = qcadev->bt_power;
2199
2200
if (power && power->pwrseq) {
2201
pwrseq_power_off(power->pwrseq);
2202
set_bit(QCA_BT_OFF, &qca->flags);
2203
return;
2204
}
2205
2206
switch (soc_type) {
2207
case QCA_WCN3988:
2208
case QCA_WCN3990:
2209
case QCA_WCN3991:
2210
case QCA_WCN3998:
2211
host_set_baudrate(hu, 2400);
2212
qca_send_power_pulse(hu, false);
2213
qca_regulator_disable(qcadev);
2214
break;
2215
2216
case QCA_WCN6750:
2217
case QCA_WCN6855:
2218
gpiod_set_value_cansleep(qcadev->bt_en, 0);
2219
msleep(100);
2220
qca_regulator_disable(qcadev);
2221
if (qcadev->sw_ctrl) {
2222
sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
2223
bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state);
2224
}
2225
break;
2226
2227
default:
2228
gpiod_set_value_cansleep(qcadev->bt_en, 0);
2229
}
2230
2231
set_bit(QCA_BT_OFF, &qca->flags);
2232
}
2233
2234
static int qca_power_off(struct hci_dev *hdev)
2235
{
2236
struct hci_uart *hu = hci_get_drvdata(hdev);
2237
struct qca_data *qca = hu->priv;
2238
enum qca_btsoc_type soc_type = qca_soc_type(hu);
2239
2240
hu->hdev->hw_error = NULL;
2241
hu->hdev->reset = NULL;
2242
2243
timer_delete_sync(&qca->wake_retrans_timer);
2244
timer_delete_sync(&qca->tx_idle_timer);
2245
2246
/* Stop sending shutdown command if soc crashes. */
2247
if (soc_type != QCA_ROME
2248
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
2249
qca_send_pre_shutdown_cmd(hdev);
2250
usleep_range(8000, 10000);
2251
}
2252
2253
qca_power_shutdown(hu);
2254
return 0;
2255
}
2256
2257
static int qca_regulator_enable(struct qca_serdev *qcadev)
2258
{
2259
struct qca_power *power = qcadev->bt_power;
2260
int ret;
2261
2262
if (power->pwrseq)
2263
return pwrseq_power_on(power->pwrseq);
2264
2265
/* Already enabled */
2266
if (power->vregs_on)
2267
return 0;
2268
2269
BT_DBG("enabling %d regulators)", power->num_vregs);
2270
2271
ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
2272
if (ret)
2273
return ret;
2274
2275
power->vregs_on = true;
2276
2277
ret = clk_prepare_enable(qcadev->susclk);
2278
if (ret)
2279
qca_regulator_disable(qcadev);
2280
2281
return ret;
2282
}
2283
2284
static void qca_regulator_disable(struct qca_serdev *qcadev)
2285
{
2286
struct qca_power *power;
2287
2288
if (!qcadev)
2289
return;
2290
2291
power = qcadev->bt_power;
2292
2293
/* Already disabled? */
2294
if (!power->vregs_on)
2295
return;
2296
2297
regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
2298
power->vregs_on = false;
2299
2300
clk_disable_unprepare(qcadev->susclk);
2301
}
2302
2303
static int qca_init_regulators(struct qca_power *qca,
2304
const struct qca_vreg *vregs, size_t num_vregs)
2305
{
2306
struct regulator_bulk_data *bulk;
2307
int ret;
2308
int i;
2309
2310
bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
2311
if (!bulk)
2312
return -ENOMEM;
2313
2314
for (i = 0; i < num_vregs; i++)
2315
bulk[i].supply = vregs[i].name;
2316
2317
ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
2318
if (ret < 0)
2319
return ret;
2320
2321
for (i = 0; i < num_vregs; i++) {
2322
ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
2323
if (ret)
2324
return ret;
2325
}
2326
2327
qca->vreg_bulk = bulk;
2328
qca->num_vregs = num_vregs;
2329
2330
return 0;
2331
}
2332
2333
static int qca_serdev_probe(struct serdev_device *serdev)
2334
{
2335
struct qca_serdev *qcadev;
2336
struct hci_dev *hdev;
2337
const struct qca_device_data *data;
2338
int err;
2339
bool power_ctrl_enabled = true;
2340
2341
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
2342
if (!qcadev)
2343
return -ENOMEM;
2344
2345
qcadev->serdev_hu.serdev = serdev;
2346
data = device_get_match_data(&serdev->dev);
2347
serdev_device_set_drvdata(serdev, qcadev);
2348
device_property_read_string_array(&serdev->dev, "firmware-name",
2349
qcadev->firmware_name, ARRAY_SIZE(qcadev->firmware_name));
2350
device_property_read_u32(&serdev->dev, "max-speed",
2351
&qcadev->oper_speed);
2352
if (!qcadev->oper_speed)
2353
BT_DBG("UART will pick default operating speed");
2354
2355
qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
2356
"qcom,local-bd-address-broken");
2357
2358
if (data)
2359
qcadev->btsoc_type = data->soc_type;
2360
else
2361
qcadev->btsoc_type = QCA_ROME;
2362
2363
switch (qcadev->btsoc_type) {
2364
case QCA_WCN3950:
2365
case QCA_WCN3988:
2366
case QCA_WCN3990:
2367
case QCA_WCN3991:
2368
case QCA_WCN3998:
2369
case QCA_WCN6750:
2370
case QCA_WCN6855:
2371
case QCA_WCN7850:
2372
case QCA_QCA6390:
2373
qcadev->bt_power = devm_kzalloc(&serdev->dev,
2374
sizeof(struct qca_power),
2375
GFP_KERNEL);
2376
if (!qcadev->bt_power)
2377
return -ENOMEM;
2378
break;
2379
default:
2380
break;
2381
}
2382
2383
switch (qcadev->btsoc_type) {
2384
case QCA_WCN6855:
2385
case QCA_WCN7850:
2386
case QCA_WCN6750:
2387
if (!device_property_present(&serdev->dev, "enable-gpios")) {
2388
/*
2389
* Backward compatibility with old DT sources. If the
2390
* node doesn't have the 'enable-gpios' property then
2391
* let's use the power sequencer. Otherwise, let's
2392
* drive everything ourselves.
2393
*/
2394
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
2395
"bluetooth");
2396
2397
/*
2398
* Some modules have BT_EN enabled via a hardware pull-up,
2399
* meaning it is not defined in the DTS and is not controlled
2400
* through the power sequence. In such cases, fall through
2401
* to follow the legacy flow.
2402
*/
2403
if (IS_ERR(qcadev->bt_power->pwrseq))
2404
qcadev->bt_power->pwrseq = NULL;
2405
else
2406
break;
2407
}
2408
fallthrough;
2409
case QCA_WCN3950:
2410
case QCA_WCN3988:
2411
case QCA_WCN3990:
2412
case QCA_WCN3991:
2413
case QCA_WCN3998:
2414
qcadev->bt_power->dev = &serdev->dev;
2415
err = qca_init_regulators(qcadev->bt_power, data->vregs,
2416
data->num_vregs);
2417
if (err) {
2418
BT_ERR("Failed to init regulators:%d", err);
2419
return err;
2420
}
2421
2422
qcadev->bt_power->vregs_on = false;
2423
2424
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
2425
GPIOD_OUT_LOW);
2426
if (IS_ERR(qcadev->bt_en))
2427
return dev_err_probe(&serdev->dev,
2428
PTR_ERR(qcadev->bt_en),
2429
"failed to acquire BT_EN gpio\n");
2430
2431
if (!qcadev->bt_en &&
2432
(data->soc_type == QCA_WCN6750 ||
2433
data->soc_type == QCA_WCN6855))
2434
power_ctrl_enabled = false;
2435
2436
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
2437
GPIOD_IN);
2438
if (IS_ERR(qcadev->sw_ctrl) &&
2439
(data->soc_type == QCA_WCN6750 ||
2440
data->soc_type == QCA_WCN6855 ||
2441
data->soc_type == QCA_WCN7850)) {
2442
dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
2443
return PTR_ERR(qcadev->sw_ctrl);
2444
}
2445
2446
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
2447
if (IS_ERR(qcadev->susclk)) {
2448
dev_err(&serdev->dev, "failed to acquire clk\n");
2449
return PTR_ERR(qcadev->susclk);
2450
}
2451
break;
2452
2453
case QCA_QCA6390:
2454
if (dev_of_node(&serdev->dev)) {
2455
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
2456
"bluetooth");
2457
if (IS_ERR(qcadev->bt_power->pwrseq))
2458
return PTR_ERR(qcadev->bt_power->pwrseq);
2459
break;
2460
}
2461
fallthrough;
2462
2463
default:
2464
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
2465
GPIOD_OUT_LOW);
2466
if (IS_ERR(qcadev->bt_en)) {
2467
dev_err(&serdev->dev, "failed to acquire enable gpio\n");
2468
return PTR_ERR(qcadev->bt_en);
2469
}
2470
2471
if (!qcadev->bt_en)
2472
power_ctrl_enabled = false;
2473
2474
qcadev->susclk = devm_clk_get_optional_enabled_with_rate(
2475
&serdev->dev, NULL, SUSCLK_RATE_32KHZ);
2476
if (IS_ERR(qcadev->susclk)) {
2477
dev_warn(&serdev->dev, "failed to acquire clk\n");
2478
return PTR_ERR(qcadev->susclk);
2479
}
2480
}
2481
2482
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
2483
if (err) {
2484
BT_ERR("serdev registration failed");
2485
return err;
2486
}
2487
2488
hdev = qcadev->serdev_hu.hdev;
2489
2490
if (power_ctrl_enabled) {
2491
hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
2492
hdev->shutdown = qca_power_off;
2493
}
2494
2495
if (data) {
2496
/* Wideband speech support must be set per driver since it can't
2497
* be queried via hci. Same with the valid le states quirk.
2498
*/
2499
if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
2500
hci_set_quirk(hdev,
2501
HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
2502
2503
if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
2504
hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
2505
}
2506
2507
return 0;
2508
}
2509
2510
static void qca_serdev_remove(struct serdev_device *serdev)
2511
{
2512
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2513
struct qca_power *power = qcadev->bt_power;
2514
2515
switch (qcadev->btsoc_type) {
2516
case QCA_WCN3988:
2517
case QCA_WCN3990:
2518
case QCA_WCN3991:
2519
case QCA_WCN3998:
2520
case QCA_WCN6750:
2521
case QCA_WCN6855:
2522
case QCA_WCN7850:
2523
if (power->vregs_on)
2524
qca_power_shutdown(&qcadev->serdev_hu);
2525
break;
2526
default:
2527
break;
2528
}
2529
2530
hci_uart_unregister_device(&qcadev->serdev_hu);
2531
}
2532
2533
static void qca_serdev_shutdown(struct device *dev)
2534
{
2535
int ret;
2536
int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
2537
struct serdev_device *serdev = to_serdev_device(dev);
2538
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2539
struct hci_uart *hu = &qcadev->serdev_hu;
2540
struct hci_dev *hdev = hu->hdev;
2541
const u8 ibs_wake_cmd[] = { 0xFD };
2542
const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
2543
2544
if (qcadev->btsoc_type == QCA_QCA6390) {
2545
/* The purpose of sending the VSC is to reset SOC into a initial
2546
* state and the state will ensure next hdev->setup() success.
2547
* if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
2548
* hdev->setup() can do its job regardless of SoC state, so
2549
* don't need to send the VSC.
2550
* if HCI_SETUP is set, it means that hdev->setup() was never
2551
* invoked and the SOC is already in the initial state, so
2552
* don't also need to send the VSC.
2553
*/
2554
if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP) ||
2555
hci_dev_test_flag(hdev, HCI_SETUP))
2556
return;
2557
2558
/* The serdev must be in open state when control logic arrives
2559
* here, so also fix the use-after-free issue caused by that
2560
* the serdev is flushed or wrote after it is closed.
2561
*/
2562
serdev_device_write_flush(serdev);
2563
ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
2564
sizeof(ibs_wake_cmd));
2565
if (ret < 0) {
2566
BT_ERR("QCA send IBS_WAKE_IND error: %d", ret);
2567
return;
2568
}
2569
serdev_device_wait_until_sent(serdev, timeout);
2570
usleep_range(8000, 10000);
2571
2572
serdev_device_write_flush(serdev);
2573
ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd,
2574
sizeof(edl_reset_soc_cmd));
2575
if (ret < 0) {
2576
BT_ERR("QCA send EDL_RESET_REQ error: %d", ret);
2577
return;
2578
}
2579
serdev_device_wait_until_sent(serdev, timeout);
2580
usleep_range(8000, 10000);
2581
}
2582
}
2583
2584
static int __maybe_unused qca_suspend(struct device *dev)
2585
{
2586
struct serdev_device *serdev = to_serdev_device(dev);
2587
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2588
struct hci_uart *hu = &qcadev->serdev_hu;
2589
struct qca_data *qca = hu->priv;
2590
unsigned long flags;
2591
bool tx_pending = false;
2592
int ret = 0;
2593
u8 cmd;
2594
u32 wait_timeout = 0;
2595
2596
set_bit(QCA_SUSPENDING, &qca->flags);
2597
2598
/* if BT SoC is running with default firmware then it does not
2599
* support in-band sleep
2600
*/
2601
if (test_bit(QCA_ROM_FW, &qca->flags))
2602
return 0;
2603
2604
/* During SSR after memory dump collection, controller will be
2605
* powered off and then powered on.If controller is powered off
2606
* during SSR then we should wait until SSR is completed.
2607
*/
2608
if (test_bit(QCA_BT_OFF, &qca->flags) &&
2609
!test_bit(QCA_SSR_TRIGGERED, &qca->flags))
2610
return 0;
2611
2612
if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
2613
test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
2614
wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
2615
IBS_DISABLE_SSR_TIMEOUT_MS :
2616
FW_DOWNLOAD_TIMEOUT_MS;
2617
2618
/* QCA_IBS_DISABLED flag is set to true, During FW download
2619
* and during memory dump collection. It is reset to false,
2620
* After FW download complete.
2621
*/
2622
wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
2623
TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
2624
2625
if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
2626
bt_dev_err(hu->hdev, "SSR or FW download time out");
2627
ret = -ETIMEDOUT;
2628
goto error;
2629
}
2630
}
2631
2632
cancel_work_sync(&qca->ws_awake_device);
2633
cancel_work_sync(&qca->ws_awake_rx);
2634
2635
spin_lock_irqsave_nested(&qca->hci_ibs_lock,
2636
flags, SINGLE_DEPTH_NESTING);
2637
2638
switch (qca->tx_ibs_state) {
2639
case HCI_IBS_TX_WAKING:
2640
timer_delete(&qca->wake_retrans_timer);
2641
fallthrough;
2642
case HCI_IBS_TX_AWAKE:
2643
timer_delete(&qca->tx_idle_timer);
2644
2645
serdev_device_write_flush(hu->serdev);
2646
cmd = HCI_IBS_SLEEP_IND;
2647
ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
2648
2649
if (ret < 0) {
2650
BT_ERR("Failed to send SLEEP to device");
2651
break;
2652
}
2653
2654
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
2655
qca->ibs_sent_slps++;
2656
tx_pending = true;
2657
break;
2658
2659
case HCI_IBS_TX_ASLEEP:
2660
break;
2661
2662
default:
2663
BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
2664
ret = -EINVAL;
2665
break;
2666
}
2667
2668
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2669
2670
if (ret < 0)
2671
goto error;
2672
2673
if (tx_pending) {
2674
serdev_device_wait_until_sent(hu->serdev,
2675
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
2676
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
2677
}
2678
2679
/* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
2680
* to sleep, so that the packet does not wake the system later.
2681
*/
2682
ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
2683
qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
2684
msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
2685
if (ret == 0) {
2686
ret = -ETIMEDOUT;
2687
goto error;
2688
}
2689
2690
return 0;
2691
2692
error:
2693
clear_bit(QCA_SUSPENDING, &qca->flags);
2694
2695
return ret;
2696
}
2697
2698
static int __maybe_unused qca_resume(struct device *dev)
2699
{
2700
struct serdev_device *serdev = to_serdev_device(dev);
2701
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2702
struct hci_uart *hu = &qcadev->serdev_hu;
2703
struct qca_data *qca = hu->priv;
2704
2705
clear_bit(QCA_SUSPENDING, &qca->flags);
2706
2707
return 0;
2708
}
2709
2710
static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
2711
2712
#ifdef CONFIG_OF
2713
static const struct of_device_id qca_bluetooth_of_match[] = {
2714
{ .compatible = "qcom,qca2066-bt", .data = &qca_soc_data_qca2066},
2715
{ .compatible = "qcom,qca6174-bt" },
2716
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
2717
{ .compatible = "qcom,qca9377-bt" },
2718
{ .compatible = "qcom,wcn3950-bt", .data = &qca_soc_data_wcn3950},
2719
{ .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
2720
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
2721
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
2722
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
2723
{ .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750},
2724
{ .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855},
2725
{ .compatible = "qcom,wcn7850-bt", .data = &qca_soc_data_wcn7850},
2726
{ /* sentinel */ }
2727
};
2728
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
2729
#endif
2730
2731
#ifdef CONFIG_ACPI
2732
static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
2733
{ "QCOM2066", (kernel_ulong_t)&qca_soc_data_qca2066 },
2734
{ "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2735
{ "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2736
{ "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2737
{ "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2738
{ },
2739
};
2740
MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
2741
#endif
2742
2743
#ifdef CONFIG_DEV_COREDUMP
2744
static void hciqca_coredump(struct device *dev)
2745
{
2746
struct serdev_device *serdev = to_serdev_device(dev);
2747
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2748
struct hci_uart *hu = &qcadev->serdev_hu;
2749
struct hci_dev *hdev = hu->hdev;
2750
2751
if (hdev->dump.coredump)
2752
hdev->dump.coredump(hdev);
2753
}
2754
#endif
2755
2756
static struct serdev_device_driver qca_serdev_driver = {
2757
.probe = qca_serdev_probe,
2758
.remove = qca_serdev_remove,
2759
.driver = {
2760
.name = "hci_uart_qca",
2761
.of_match_table = of_match_ptr(qca_bluetooth_of_match),
2762
.acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
2763
.shutdown = qca_serdev_shutdown,
2764
.pm = &qca_pm_ops,
2765
#ifdef CONFIG_DEV_COREDUMP
2766
.coredump = hciqca_coredump,
2767
#endif
2768
},
2769
};
2770
2771
int __init qca_init(void)
2772
{
2773
serdev_device_driver_register(&qca_serdev_driver);
2774
2775
return hci_uart_register_proto(&qca_proto);
2776
}
2777
2778
int __exit qca_deinit(void)
2779
{
2780
serdev_device_driver_unregister(&qca_serdev_driver);
2781
2782
return hci_uart_unregister_proto(&qca_proto);
2783
}
2784
2785