Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/bluetooth/hci_intel.c
49527 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
*
4
* Bluetooth HCI UART driver for Intel devices
5
*
6
* Copyright (C) 2015 Intel Corporation
7
*/
8
9
#include <linux/kernel.h>
10
#include <linux/errno.h>
11
#include <linux/skbuff.h>
12
#include <linux/firmware.h>
13
#include <linux/module.h>
14
#include <linux/wait.h>
15
#include <linux/tty.h>
16
#include <linux/platform_device.h>
17
#include <linux/gpio/consumer.h>
18
#include <linux/acpi.h>
19
#include <linux/interrupt.h>
20
#include <linux/pm_runtime.h>
21
22
#include <net/bluetooth/bluetooth.h>
23
#include <net/bluetooth/hci_core.h>
24
25
#include "hci_uart.h"
26
#include "btintel.h"
27
28
#define STATE_BOOTLOADER 0
29
#define STATE_DOWNLOADING 1
30
#define STATE_FIRMWARE_LOADED 2
31
#define STATE_FIRMWARE_FAILED 3
32
#define STATE_BOOTING 4
33
#define STATE_LPM_ENABLED 5
34
#define STATE_TX_ACTIVE 6
35
#define STATE_SUSPENDED 7
36
#define STATE_LPM_TRANSACTION 8
37
38
#define HCI_LPM_WAKE_PKT 0xf0
39
#define HCI_LPM_PKT 0xf1
40
#define HCI_LPM_MAX_SIZE 10
41
#define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE
42
43
#define LPM_OP_TX_NOTIFY 0x00
44
#define LPM_OP_SUSPEND_ACK 0x02
45
#define LPM_OP_RESUME_ACK 0x03
46
47
#define LPM_SUSPEND_DELAY_MS 1000
48
49
struct hci_lpm_pkt {
50
__u8 opcode;
51
__u8 dlen;
52
__u8 data[];
53
} __packed;
54
55
struct intel_device {
56
struct list_head list;
57
struct platform_device *pdev;
58
struct gpio_desc *reset;
59
struct hci_uart *hu;
60
struct mutex hu_lock;
61
int irq;
62
};
63
64
static LIST_HEAD(intel_device_list);
65
static DEFINE_MUTEX(intel_device_list_lock);
66
67
struct intel_data {
68
struct sk_buff *rx_skb;
69
struct sk_buff_head txq;
70
struct work_struct busy_work;
71
struct hci_uart *hu;
72
unsigned long flags;
73
};
74
75
static u8 intel_convert_speed(unsigned int speed)
76
{
77
switch (speed) {
78
case 9600:
79
return 0x00;
80
case 19200:
81
return 0x01;
82
case 38400:
83
return 0x02;
84
case 57600:
85
return 0x03;
86
case 115200:
87
return 0x04;
88
case 230400:
89
return 0x05;
90
case 460800:
91
return 0x06;
92
case 921600:
93
return 0x07;
94
case 1843200:
95
return 0x08;
96
case 3250000:
97
return 0x09;
98
case 2000000:
99
return 0x0a;
100
case 3000000:
101
return 0x0b;
102
default:
103
return 0xff;
104
}
105
}
106
107
static int intel_wait_booting(struct hci_uart *hu)
108
{
109
struct intel_data *intel = hu->priv;
110
int err;
111
112
err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
113
TASK_INTERRUPTIBLE,
114
msecs_to_jiffies(1000));
115
116
if (err == -EINTR) {
117
bt_dev_err(hu->hdev, "Device boot interrupted");
118
return -EINTR;
119
}
120
121
if (err) {
122
bt_dev_err(hu->hdev, "Device boot timeout");
123
return -ETIMEDOUT;
124
}
125
126
return err;
127
}
128
129
#ifdef CONFIG_PM
130
static int intel_wait_lpm_transaction(struct hci_uart *hu)
131
{
132
struct intel_data *intel = hu->priv;
133
int err;
134
135
err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION,
136
TASK_INTERRUPTIBLE,
137
msecs_to_jiffies(1000));
138
139
if (err == -EINTR) {
140
bt_dev_err(hu->hdev, "LPM transaction interrupted");
141
return -EINTR;
142
}
143
144
if (err) {
145
bt_dev_err(hu->hdev, "LPM transaction timeout");
146
return -ETIMEDOUT;
147
}
148
149
return err;
150
}
151
152
static int intel_lpm_suspend(struct hci_uart *hu)
153
{
154
static const u8 suspend[] = { 0x01, 0x01, 0x01 };
155
struct intel_data *intel = hu->priv;
156
struct sk_buff *skb;
157
158
if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
159
test_bit(STATE_SUSPENDED, &intel->flags))
160
return 0;
161
162
if (test_bit(STATE_TX_ACTIVE, &intel->flags))
163
return -EAGAIN;
164
165
bt_dev_dbg(hu->hdev, "Suspending");
166
167
skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL);
168
if (!skb) {
169
bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
170
return -ENOMEM;
171
}
172
173
skb_put_data(skb, suspend, sizeof(suspend));
174
hci_skb_pkt_type(skb) = HCI_LPM_PKT;
175
176
set_bit(STATE_LPM_TRANSACTION, &intel->flags);
177
178
/* LPM flow is a priority, enqueue packet at list head */
179
skb_queue_head(&intel->txq, skb);
180
hci_uart_tx_wakeup(hu);
181
182
intel_wait_lpm_transaction(hu);
183
/* Even in case of failure, continue and test the suspended flag */
184
185
clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
186
187
if (!test_bit(STATE_SUSPENDED, &intel->flags)) {
188
bt_dev_err(hu->hdev, "Device suspend error");
189
return -EINVAL;
190
}
191
192
bt_dev_dbg(hu->hdev, "Suspended");
193
194
hci_uart_set_flow_control(hu, true);
195
196
return 0;
197
}
198
199
static int intel_lpm_resume(struct hci_uart *hu)
200
{
201
struct intel_data *intel = hu->priv;
202
struct sk_buff *skb;
203
204
if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
205
!test_bit(STATE_SUSPENDED, &intel->flags))
206
return 0;
207
208
bt_dev_dbg(hu->hdev, "Resuming");
209
210
hci_uart_set_flow_control(hu, false);
211
212
skb = bt_skb_alloc(0, GFP_KERNEL);
213
if (!skb) {
214
bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
215
return -ENOMEM;
216
}
217
218
hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT;
219
220
set_bit(STATE_LPM_TRANSACTION, &intel->flags);
221
222
/* LPM flow is a priority, enqueue packet at list head */
223
skb_queue_head(&intel->txq, skb);
224
hci_uart_tx_wakeup(hu);
225
226
intel_wait_lpm_transaction(hu);
227
/* Even in case of failure, continue and test the suspended flag */
228
229
clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
230
231
if (test_bit(STATE_SUSPENDED, &intel->flags)) {
232
bt_dev_err(hu->hdev, "Device resume error");
233
return -EINVAL;
234
}
235
236
bt_dev_dbg(hu->hdev, "Resumed");
237
238
return 0;
239
}
240
#endif /* CONFIG_PM */
241
242
static int intel_lpm_host_wake(struct hci_uart *hu)
243
{
244
static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 };
245
struct intel_data *intel = hu->priv;
246
struct sk_buff *skb;
247
248
hci_uart_set_flow_control(hu, false);
249
250
clear_bit(STATE_SUSPENDED, &intel->flags);
251
252
skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL);
253
if (!skb) {
254
bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
255
return -ENOMEM;
256
}
257
258
skb_put_data(skb, lpm_resume_ack, sizeof(lpm_resume_ack));
259
hci_skb_pkt_type(skb) = HCI_LPM_PKT;
260
261
/* LPM flow is a priority, enqueue packet at list head */
262
skb_queue_head(&intel->txq, skb);
263
hci_uart_tx_wakeup(hu);
264
265
bt_dev_dbg(hu->hdev, "Resumed by controller");
266
267
return 0;
268
}
269
270
static irqreturn_t intel_irq(int irq, void *dev_id)
271
{
272
struct intel_device *idev = dev_id;
273
274
dev_info(&idev->pdev->dev, "hci_intel irq\n");
275
276
mutex_lock(&idev->hu_lock);
277
if (idev->hu)
278
intel_lpm_host_wake(idev->hu);
279
mutex_unlock(&idev->hu_lock);
280
281
/* Host/Controller are now LPM resumed, trigger a new delayed suspend */
282
pm_runtime_get(&idev->pdev->dev);
283
pm_runtime_put_autosuspend(&idev->pdev->dev);
284
285
return IRQ_HANDLED;
286
}
287
288
static int intel_set_power(struct hci_uart *hu, bool powered)
289
{
290
struct intel_device *idev;
291
int err = -ENODEV;
292
293
if (!hu->tty->dev)
294
return err;
295
296
mutex_lock(&intel_device_list_lock);
297
298
list_for_each_entry(idev, &intel_device_list, list) {
299
/* tty device and pdev device should share the same parent
300
* which is the UART port.
301
*/
302
if (hu->tty->dev->parent != idev->pdev->dev.parent)
303
continue;
304
305
if (!idev->reset) {
306
err = -ENOTSUPP;
307
break;
308
}
309
310
BT_INFO("hu %p, Switching compatible pm device (%s) to %u",
311
hu, dev_name(&idev->pdev->dev), powered);
312
313
gpiod_set_value(idev->reset, powered);
314
315
/* Provide to idev a hu reference which is used to run LPM
316
* transactions (lpm suspend/resume) from PM callbacks.
317
* hu needs to be protected against concurrent removing during
318
* these PM ops.
319
*/
320
mutex_lock(&idev->hu_lock);
321
idev->hu = powered ? hu : NULL;
322
mutex_unlock(&idev->hu_lock);
323
324
if (idev->irq < 0)
325
break;
326
327
if (powered && device_can_wakeup(&idev->pdev->dev)) {
328
err = devm_request_threaded_irq(&idev->pdev->dev,
329
idev->irq, NULL,
330
intel_irq,
331
IRQF_ONESHOT,
332
"bt-host-wake", idev);
333
if (err) {
334
BT_ERR("hu %p, unable to allocate irq-%d",
335
hu, idev->irq);
336
break;
337
}
338
339
device_wakeup_enable(&idev->pdev->dev);
340
341
pm_runtime_set_active(&idev->pdev->dev);
342
pm_runtime_use_autosuspend(&idev->pdev->dev);
343
pm_runtime_set_autosuspend_delay(&idev->pdev->dev,
344
LPM_SUSPEND_DELAY_MS);
345
pm_runtime_enable(&idev->pdev->dev);
346
} else if (!powered && device_may_wakeup(&idev->pdev->dev)) {
347
devm_free_irq(&idev->pdev->dev, idev->irq, idev);
348
device_wakeup_disable(&idev->pdev->dev);
349
350
pm_runtime_disable(&idev->pdev->dev);
351
}
352
}
353
354
mutex_unlock(&intel_device_list_lock);
355
356
return err;
357
}
358
359
static void intel_busy_work(struct work_struct *work)
360
{
361
struct intel_data *intel = container_of(work, struct intel_data,
362
busy_work);
363
struct intel_device *idev;
364
365
if (!intel->hu->tty->dev)
366
return;
367
368
/* Link is busy, delay the suspend */
369
mutex_lock(&intel_device_list_lock);
370
list_for_each_entry(idev, &intel_device_list, list) {
371
if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
372
pm_runtime_get(&idev->pdev->dev);
373
pm_runtime_put_autosuspend(&idev->pdev->dev);
374
break;
375
}
376
}
377
mutex_unlock(&intel_device_list_lock);
378
}
379
380
static int intel_open(struct hci_uart *hu)
381
{
382
struct intel_data *intel;
383
384
BT_DBG("hu %p", hu);
385
386
if (!hci_uart_has_flow_control(hu))
387
return -EOPNOTSUPP;
388
389
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
390
if (!intel)
391
return -ENOMEM;
392
393
skb_queue_head_init(&intel->txq);
394
INIT_WORK(&intel->busy_work, intel_busy_work);
395
396
intel->hu = hu;
397
398
hu->priv = intel;
399
400
if (!intel_set_power(hu, true))
401
set_bit(STATE_BOOTING, &intel->flags);
402
403
return 0;
404
}
405
406
static int intel_close(struct hci_uart *hu)
407
{
408
struct intel_data *intel = hu->priv;
409
410
BT_DBG("hu %p", hu);
411
412
cancel_work_sync(&intel->busy_work);
413
414
intel_set_power(hu, false);
415
416
skb_queue_purge(&intel->txq);
417
kfree_skb(intel->rx_skb);
418
kfree(intel);
419
420
hu->priv = NULL;
421
return 0;
422
}
423
424
static int intel_flush(struct hci_uart *hu)
425
{
426
struct intel_data *intel = hu->priv;
427
428
BT_DBG("hu %p", hu);
429
430
skb_queue_purge(&intel->txq);
431
432
return 0;
433
}
434
435
static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
436
{
437
struct sk_buff *skb;
438
struct hci_event_hdr *hdr;
439
struct hci_ev_cmd_complete *evt;
440
441
skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
442
if (!skb)
443
return -ENOMEM;
444
445
hdr = skb_put(skb, sizeof(*hdr));
446
hdr->evt = HCI_EV_CMD_COMPLETE;
447
hdr->plen = sizeof(*evt) + 1;
448
449
evt = skb_put(skb, sizeof(*evt));
450
evt->ncmd = 0x01;
451
evt->opcode = cpu_to_le16(opcode);
452
453
skb_put_u8(skb, 0x00);
454
455
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
456
457
return hci_recv_frame(hdev, skb);
458
}
459
460
static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
461
{
462
struct intel_data *intel = hu->priv;
463
struct hci_dev *hdev = hu->hdev;
464
u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 };
465
struct sk_buff *skb;
466
int err;
467
468
/* This can be the first command sent to the chip, check
469
* that the controller is ready.
470
*/
471
err = intel_wait_booting(hu);
472
473
clear_bit(STATE_BOOTING, &intel->flags);
474
475
/* In case of timeout, try to continue anyway */
476
if (err && err != -ETIMEDOUT)
477
return err;
478
479
bt_dev_info(hdev, "Change controller speed to %d", speed);
480
481
speed_cmd[3] = intel_convert_speed(speed);
482
if (speed_cmd[3] == 0xff) {
483
bt_dev_err(hdev, "Unsupported speed");
484
return -EINVAL;
485
}
486
487
/* Device will not accept speed change if Intel version has not been
488
* previously requested.
489
*/
490
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
491
if (IS_ERR(skb)) {
492
bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
493
PTR_ERR(skb));
494
return PTR_ERR(skb);
495
}
496
kfree_skb(skb);
497
498
skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
499
if (!skb) {
500
bt_dev_err(hdev, "Failed to alloc memory for baudrate packet");
501
return -ENOMEM;
502
}
503
504
skb_put_data(skb, speed_cmd, sizeof(speed_cmd));
505
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
506
507
hci_uart_set_flow_control(hu, true);
508
509
skb_queue_tail(&intel->txq, skb);
510
hci_uart_tx_wakeup(hu);
511
512
/* wait 100ms to change baudrate on controller side */
513
msleep(100);
514
515
hci_uart_set_baudrate(hu, speed);
516
hci_uart_set_flow_control(hu, false);
517
518
return 0;
519
}
520
521
static int intel_setup(struct hci_uart *hu)
522
{
523
struct intel_data *intel = hu->priv;
524
struct hci_dev *hdev = hu->hdev;
525
struct sk_buff *skb;
526
struct intel_version ver;
527
struct intel_boot_params params;
528
struct intel_device *idev;
529
const struct firmware *fw;
530
char fwname[64];
531
u32 boot_param;
532
ktime_t calltime, delta, rettime;
533
unsigned long long duration;
534
unsigned int init_speed, oper_speed;
535
int speed_change = 0;
536
int err;
537
538
bt_dev_dbg(hdev, "");
539
540
hu->hdev->set_diag = btintel_set_diag;
541
hu->hdev->set_bdaddr = btintel_set_bdaddr;
542
543
/* Set the default boot parameter to 0x0 and it is updated to
544
* SKU specific boot parameter after reading Intel_Write_Boot_Params
545
* command while downloading the firmware.
546
*/
547
boot_param = 0x00000000;
548
549
calltime = ktime_get();
550
551
if (hu->init_speed)
552
init_speed = hu->init_speed;
553
else
554
init_speed = hu->proto->init_speed;
555
556
if (hu->oper_speed)
557
oper_speed = hu->oper_speed;
558
else
559
oper_speed = hu->proto->oper_speed;
560
561
if (oper_speed && init_speed && oper_speed != init_speed)
562
speed_change = 1;
563
564
/* Check that the controller is ready */
565
err = intel_wait_booting(hu);
566
567
clear_bit(STATE_BOOTING, &intel->flags);
568
569
/* In case of timeout, try to continue anyway */
570
if (err && err != -ETIMEDOUT)
571
return err;
572
573
set_bit(STATE_BOOTLOADER, &intel->flags);
574
575
/* Read the Intel version information to determine if the device
576
* is in bootloader mode or if it already has operational firmware
577
* loaded.
578
*/
579
err = btintel_read_version(hdev, &ver);
580
if (err)
581
return err;
582
583
/* The hardware platform number has a fixed value of 0x37 and
584
* for now only accept this single value.
585
*/
586
if (ver.hw_platform != 0x37) {
587
bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
588
ver.hw_platform);
589
return -EINVAL;
590
}
591
592
/* Check for supported iBT hardware variants of this firmware
593
* loading method.
594
*
595
* This check has been put in place to ensure correct forward
596
* compatibility options when newer hardware variants come along.
597
*/
598
switch (ver.hw_variant) {
599
case 0x0b: /* LnP */
600
case 0x0c: /* WsP */
601
case 0x12: /* ThP */
602
break;
603
default:
604
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
605
ver.hw_variant);
606
return -EINVAL;
607
}
608
609
btintel_version_info(hdev, &ver);
610
611
/* The firmware variant determines if the device is in bootloader
612
* mode or is running operational firmware. The value 0x06 identifies
613
* the bootloader and the value 0x23 identifies the operational
614
* firmware.
615
*
616
* When the operational firmware is already present, then only
617
* the check for valid Bluetooth device address is needed. This
618
* determines if the device will be added as configured or
619
* unconfigured controller.
620
*
621
* It is not possible to use the Secure Boot Parameters in this
622
* case since that command is only available in bootloader mode.
623
*/
624
if (ver.fw_variant == 0x23) {
625
clear_bit(STATE_BOOTLOADER, &intel->flags);
626
btintel_check_bdaddr(hdev);
627
return 0;
628
}
629
630
/* If the device is not in bootloader mode, then the only possible
631
* choice is to return an error and abort the device initialization.
632
*/
633
if (ver.fw_variant != 0x06) {
634
bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
635
ver.fw_variant);
636
return -ENODEV;
637
}
638
639
/* Read the secure boot parameters to identify the operating
640
* details of the bootloader.
641
*/
642
err = btintel_read_boot_params(hdev, &params);
643
if (err)
644
return err;
645
646
/* It is required that every single firmware fragment is acknowledged
647
* with a command complete event. If the boot parameters indicate
648
* that this bootloader does not send them, then abort the setup.
649
*/
650
if (params.limited_cce != 0x00) {
651
bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
652
params.limited_cce);
653
return -EINVAL;
654
}
655
656
/* If the OTP has no valid Bluetooth device address, then there will
657
* also be no valid address for the operational firmware.
658
*/
659
if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
660
bt_dev_info(hdev, "No device address configured");
661
hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
662
}
663
664
/* With this Intel bootloader only the hardware variant and device
665
* revision information are used to select the right firmware for SfP
666
* and WsP.
667
*
668
* The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
669
*
670
* Currently the supported hardware variants are:
671
* 11 (0x0b) for iBT 3.0 (LnP/SfP)
672
* 12 (0x0c) for iBT 3.5 (WsP)
673
*
674
* For ThP/JfP and for future SKU's, the FW name varies based on HW
675
* variant, HW revision and FW revision, as these are dependent on CNVi
676
* and RF Combination.
677
*
678
* 18 (0x12) for iBT3.5 (ThP/JfP)
679
*
680
* The firmware file name for these will be
681
* ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
682
*
683
*/
684
switch (ver.hw_variant) {
685
case 0x0b: /* SfP */
686
case 0x0c: /* WsP */
687
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
688
ver.hw_variant, le16_to_cpu(params.dev_revid));
689
break;
690
case 0x12: /* ThP */
691
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
692
ver.hw_variant, ver.hw_revision, ver.fw_revision);
693
break;
694
default:
695
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
696
ver.hw_variant);
697
return -EINVAL;
698
}
699
700
err = request_firmware(&fw, fwname, &hdev->dev);
701
if (err < 0) {
702
bt_dev_err(hdev, "Failed to load Intel firmware file (%d)",
703
err);
704
return err;
705
}
706
707
bt_dev_info(hdev, "Found device firmware: %s", fwname);
708
709
/* Save the DDC file name for later */
710
switch (ver.hw_variant) {
711
case 0x0b: /* SfP */
712
case 0x0c: /* WsP */
713
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
714
ver.hw_variant, le16_to_cpu(params.dev_revid));
715
break;
716
case 0x12: /* ThP */
717
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
718
ver.hw_variant, ver.hw_revision, ver.fw_revision);
719
break;
720
default:
721
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
722
ver.hw_variant);
723
return -EINVAL;
724
}
725
726
if (fw->size < 644) {
727
bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
728
fw->size);
729
err = -EBADF;
730
goto done;
731
}
732
733
set_bit(STATE_DOWNLOADING, &intel->flags);
734
735
/* Start firmware downloading and get boot parameter */
736
err = btintel_download_firmware(hdev, &ver, fw, &boot_param);
737
if (err < 0)
738
goto done;
739
740
set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
741
742
bt_dev_info(hdev, "Waiting for firmware download to complete");
743
744
/* Before switching the device into operational mode and with that
745
* booting the loaded firmware, wait for the bootloader notification
746
* that all fragments have been successfully received.
747
*
748
* When the event processing receives the notification, then the
749
* STATE_DOWNLOADING flag will be cleared.
750
*
751
* The firmware loading should not take longer than 5 seconds
752
* and thus just timeout if that happens and fail the setup
753
* of this device.
754
*/
755
err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
756
TASK_INTERRUPTIBLE,
757
msecs_to_jiffies(5000));
758
if (err == -EINTR) {
759
bt_dev_err(hdev, "Firmware loading interrupted");
760
err = -EINTR;
761
goto done;
762
}
763
764
if (err) {
765
bt_dev_err(hdev, "Firmware loading timeout");
766
err = -ETIMEDOUT;
767
goto done;
768
}
769
770
if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
771
bt_dev_err(hdev, "Firmware loading failed");
772
err = -ENOEXEC;
773
goto done;
774
}
775
776
rettime = ktime_get();
777
delta = ktime_sub(rettime, calltime);
778
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
779
780
bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
781
782
done:
783
release_firmware(fw);
784
785
/* Check if there was an error and if is not -EALREADY which means the
786
* firmware has already been loaded.
787
*/
788
if (err < 0 && err != -EALREADY)
789
return err;
790
791
/* We need to restore the default speed before Intel reset */
792
if (speed_change) {
793
err = intel_set_baudrate(hu, init_speed);
794
if (err)
795
return err;
796
}
797
798
calltime = ktime_get();
799
800
set_bit(STATE_BOOTING, &intel->flags);
801
802
err = btintel_send_intel_reset(hdev, boot_param);
803
if (err)
804
return err;
805
806
/* The bootloader will not indicate when the device is ready. This
807
* is done by the operational firmware sending bootup notification.
808
*
809
* Booting into operational firmware should not take longer than
810
* 1 second. However if that happens, then just fail the setup
811
* since something went wrong.
812
*/
813
bt_dev_info(hdev, "Waiting for device to boot");
814
815
err = intel_wait_booting(hu);
816
if (err)
817
return err;
818
819
clear_bit(STATE_BOOTING, &intel->flags);
820
821
rettime = ktime_get();
822
delta = ktime_sub(rettime, calltime);
823
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
824
825
bt_dev_info(hdev, "Device booted in %llu usecs", duration);
826
827
/* Enable LPM if matching pdev with wakeup enabled, set TX active
828
* until further LPM TX notification.
829
*/
830
mutex_lock(&intel_device_list_lock);
831
list_for_each_entry(idev, &intel_device_list, list) {
832
if (!hu->tty->dev)
833
break;
834
if (hu->tty->dev->parent == idev->pdev->dev.parent) {
835
if (device_may_wakeup(&idev->pdev->dev)) {
836
set_bit(STATE_LPM_ENABLED, &intel->flags);
837
set_bit(STATE_TX_ACTIVE, &intel->flags);
838
}
839
break;
840
}
841
}
842
mutex_unlock(&intel_device_list_lock);
843
844
/* Ignore errors, device can work without DDC parameters */
845
btintel_load_ddc_config(hdev, fwname);
846
847
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
848
if (IS_ERR(skb))
849
return PTR_ERR(skb);
850
kfree_skb(skb);
851
852
if (speed_change) {
853
err = intel_set_baudrate(hu, oper_speed);
854
if (err)
855
return err;
856
}
857
858
bt_dev_info(hdev, "Setup complete");
859
860
clear_bit(STATE_BOOTLOADER, &intel->flags);
861
862
return 0;
863
}
864
865
static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
866
{
867
struct hci_uart *hu = hci_get_drvdata(hdev);
868
struct intel_data *intel = hu->priv;
869
struct hci_event_hdr *hdr;
870
871
if (!test_bit(STATE_BOOTLOADER, &intel->flags) &&
872
!test_bit(STATE_BOOTING, &intel->flags))
873
goto recv;
874
875
hdr = (void *)skb->data;
876
877
/* When the firmware loading completes the device sends
878
* out a vendor specific event indicating the result of
879
* the firmware loading.
880
*/
881
if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
882
skb->data[2] == 0x06) {
883
if (skb->data[3] != 0x00)
884
set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
885
886
if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
887
test_bit(STATE_FIRMWARE_LOADED, &intel->flags))
888
wake_up_bit(&intel->flags, STATE_DOWNLOADING);
889
890
/* When switching to the operational firmware the device
891
* sends a vendor specific event indicating that the bootup
892
* completed.
893
*/
894
} else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
895
skb->data[2] == 0x02) {
896
if (test_and_clear_bit(STATE_BOOTING, &intel->flags))
897
wake_up_bit(&intel->flags, STATE_BOOTING);
898
}
899
recv:
900
return hci_recv_frame(hdev, skb);
901
}
902
903
static void intel_recv_lpm_notify(struct hci_dev *hdev, int value)
904
{
905
struct hci_uart *hu = hci_get_drvdata(hdev);
906
struct intel_data *intel = hu->priv;
907
908
bt_dev_dbg(hdev, "TX idle notification (%d)", value);
909
910
if (value) {
911
set_bit(STATE_TX_ACTIVE, &intel->flags);
912
schedule_work(&intel->busy_work);
913
} else {
914
clear_bit(STATE_TX_ACTIVE, &intel->flags);
915
}
916
}
917
918
static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb)
919
{
920
struct hci_lpm_pkt *lpm = (void *)skb->data;
921
struct hci_uart *hu = hci_get_drvdata(hdev);
922
struct intel_data *intel = hu->priv;
923
924
switch (lpm->opcode) {
925
case LPM_OP_TX_NOTIFY:
926
if (lpm->dlen < 1) {
927
bt_dev_err(hu->hdev, "Invalid LPM notification packet");
928
break;
929
}
930
intel_recv_lpm_notify(hdev, lpm->data[0]);
931
break;
932
case LPM_OP_SUSPEND_ACK:
933
set_bit(STATE_SUSPENDED, &intel->flags);
934
if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags))
935
wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
936
break;
937
case LPM_OP_RESUME_ACK:
938
clear_bit(STATE_SUSPENDED, &intel->flags);
939
if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags))
940
wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
941
break;
942
default:
943
bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode);
944
break;
945
}
946
947
kfree_skb(skb);
948
949
return 0;
950
}
951
952
#define INTEL_RECV_LPM \
953
.type = HCI_LPM_PKT, \
954
.hlen = HCI_LPM_HDR_SIZE, \
955
.loff = 1, \
956
.lsize = 1, \
957
.maxlen = HCI_LPM_MAX_SIZE
958
959
static const struct h4_recv_pkt intel_recv_pkts[] = {
960
{ H4_RECV_ACL, .recv = hci_recv_frame },
961
{ H4_RECV_SCO, .recv = hci_recv_frame },
962
{ H4_RECV_EVENT, .recv = intel_recv_event },
963
{ INTEL_RECV_LPM, .recv = intel_recv_lpm },
964
};
965
966
static int intel_recv(struct hci_uart *hu, const void *data, int count)
967
{
968
struct intel_data *intel = hu->priv;
969
970
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
971
return -EUNATCH;
972
973
intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
974
intel_recv_pkts,
975
ARRAY_SIZE(intel_recv_pkts));
976
if (IS_ERR(intel->rx_skb)) {
977
int err = PTR_ERR(intel->rx_skb);
978
979
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
980
intel->rx_skb = NULL;
981
return err;
982
}
983
984
return count;
985
}
986
987
static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
988
{
989
struct intel_data *intel = hu->priv;
990
struct intel_device *idev;
991
992
BT_DBG("hu %p skb %p", hu, skb);
993
994
if (!hu->tty->dev)
995
goto out_enqueue;
996
997
/* Be sure our controller is resumed and potential LPM transaction
998
* completed before enqueuing any packet.
999
*/
1000
mutex_lock(&intel_device_list_lock);
1001
list_for_each_entry(idev, &intel_device_list, list) {
1002
if (hu->tty->dev->parent == idev->pdev->dev.parent) {
1003
pm_runtime_get_sync(&idev->pdev->dev);
1004
pm_runtime_put_autosuspend(&idev->pdev->dev);
1005
break;
1006
}
1007
}
1008
mutex_unlock(&intel_device_list_lock);
1009
out_enqueue:
1010
skb_queue_tail(&intel->txq, skb);
1011
1012
return 0;
1013
}
1014
1015
static struct sk_buff *intel_dequeue(struct hci_uart *hu)
1016
{
1017
struct intel_data *intel = hu->priv;
1018
struct sk_buff *skb;
1019
1020
skb = skb_dequeue(&intel->txq);
1021
if (!skb)
1022
return skb;
1023
1024
if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
1025
(hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) {
1026
struct hci_command_hdr *cmd = (void *)skb->data;
1027
__u16 opcode = le16_to_cpu(cmd->opcode);
1028
1029
/* When the BTINTEL_HCI_OP_RESET command is issued to boot into
1030
* the operational firmware, it will actually not send a command
1031
* complete event. To keep the flow control working inject that
1032
* event here.
1033
*/
1034
if (opcode == BTINTEL_HCI_OP_RESET)
1035
inject_cmd_complete(hu->hdev, opcode);
1036
}
1037
1038
/* Prepend skb with frame type */
1039
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
1040
1041
return skb;
1042
}
1043
1044
static const struct hci_uart_proto intel_proto = {
1045
.id = HCI_UART_INTEL,
1046
.name = "Intel",
1047
.manufacturer = 2,
1048
.init_speed = 115200,
1049
.oper_speed = 3000000,
1050
.open = intel_open,
1051
.close = intel_close,
1052
.flush = intel_flush,
1053
.setup = intel_setup,
1054
.set_baudrate = intel_set_baudrate,
1055
.recv = intel_recv,
1056
.enqueue = intel_enqueue,
1057
.dequeue = intel_dequeue,
1058
};
1059
1060
#ifdef CONFIG_ACPI
1061
static const struct acpi_device_id intel_acpi_match[] = {
1062
{ "INT33E1", 0 },
1063
{ "INT33E3", 0 },
1064
{ }
1065
};
1066
MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
1067
#endif
1068
1069
#ifdef CONFIG_PM
1070
static int intel_suspend_device(struct device *dev)
1071
{
1072
struct intel_device *idev = dev_get_drvdata(dev);
1073
1074
mutex_lock(&idev->hu_lock);
1075
if (idev->hu)
1076
intel_lpm_suspend(idev->hu);
1077
mutex_unlock(&idev->hu_lock);
1078
1079
return 0;
1080
}
1081
1082
static int intel_resume_device(struct device *dev)
1083
{
1084
struct intel_device *idev = dev_get_drvdata(dev);
1085
1086
mutex_lock(&idev->hu_lock);
1087
if (idev->hu)
1088
intel_lpm_resume(idev->hu);
1089
mutex_unlock(&idev->hu_lock);
1090
1091
return 0;
1092
}
1093
#endif
1094
1095
#ifdef CONFIG_PM_SLEEP
1096
static int intel_suspend(struct device *dev)
1097
{
1098
struct intel_device *idev = dev_get_drvdata(dev);
1099
1100
if (device_may_wakeup(dev))
1101
enable_irq_wake(idev->irq);
1102
1103
return intel_suspend_device(dev);
1104
}
1105
1106
static int intel_resume(struct device *dev)
1107
{
1108
struct intel_device *idev = dev_get_drvdata(dev);
1109
1110
if (device_may_wakeup(dev))
1111
disable_irq_wake(idev->irq);
1112
1113
return intel_resume_device(dev);
1114
}
1115
#endif
1116
1117
static const struct dev_pm_ops intel_pm_ops = {
1118
SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
1119
SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL)
1120
};
1121
1122
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
1123
static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false };
1124
1125
static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = {
1126
{ "reset-gpios", &reset_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
1127
{ "host-wake-gpios", &host_wake_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
1128
{ }
1129
};
1130
1131
static int intel_probe(struct platform_device *pdev)
1132
{
1133
struct intel_device *idev;
1134
int ret;
1135
1136
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
1137
if (!idev)
1138
return -ENOMEM;
1139
1140
mutex_init(&idev->hu_lock);
1141
1142
idev->pdev = pdev;
1143
1144
ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, acpi_hci_intel_gpios);
1145
if (ret)
1146
dev_dbg(&pdev->dev, "Unable to add GPIO mapping table\n");
1147
1148
idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
1149
if (IS_ERR(idev->reset)) {
1150
dev_err(&pdev->dev, "Unable to retrieve gpio\n");
1151
return PTR_ERR(idev->reset);
1152
}
1153
1154
idev->irq = platform_get_irq(pdev, 0);
1155
if (idev->irq < 0) {
1156
struct gpio_desc *host_wake;
1157
1158
dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
1159
1160
host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
1161
if (IS_ERR(host_wake)) {
1162
dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
1163
goto no_irq;
1164
}
1165
1166
idev->irq = gpiod_to_irq(host_wake);
1167
if (idev->irq < 0) {
1168
dev_err(&pdev->dev, "No corresponding irq for gpio\n");
1169
goto no_irq;
1170
}
1171
}
1172
1173
/* Only enable wake-up/irq when controller is powered */
1174
device_set_wakeup_capable(&pdev->dev, true);
1175
device_wakeup_disable(&pdev->dev);
1176
1177
no_irq:
1178
platform_set_drvdata(pdev, idev);
1179
1180
/* Place this instance on the device list */
1181
mutex_lock(&intel_device_list_lock);
1182
list_add_tail(&idev->list, &intel_device_list);
1183
mutex_unlock(&intel_device_list_lock);
1184
1185
dev_info(&pdev->dev, "registered, gpio(%d)/irq(%d).\n",
1186
desc_to_gpio(idev->reset), idev->irq);
1187
1188
return 0;
1189
}
1190
1191
static void intel_remove(struct platform_device *pdev)
1192
{
1193
struct intel_device *idev = platform_get_drvdata(pdev);
1194
1195
device_wakeup_disable(&pdev->dev);
1196
1197
mutex_lock(&intel_device_list_lock);
1198
list_del(&idev->list);
1199
mutex_unlock(&intel_device_list_lock);
1200
1201
dev_info(&pdev->dev, "unregistered.\n");
1202
}
1203
1204
static struct platform_driver intel_driver = {
1205
.probe = intel_probe,
1206
.remove = intel_remove,
1207
.driver = {
1208
.name = "hci_intel",
1209
.acpi_match_table = ACPI_PTR(intel_acpi_match),
1210
.pm = &intel_pm_ops,
1211
},
1212
};
1213
1214
int __init intel_init(void)
1215
{
1216
int err;
1217
1218
err = platform_driver_register(&intel_driver);
1219
if (err)
1220
return err;
1221
1222
return hci_uart_register_proto(&intel_proto);
1223
}
1224
1225
int __exit intel_deinit(void)
1226
{
1227
platform_driver_unregister(&intel_driver);
1228
1229
return hci_uart_unregister_proto(&intel_proto);
1230
}
1231
1232