Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/hci_sync.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* BlueZ - Bluetooth protocol stack for Linux
4
*
5
* Copyright (C) 2021 Intel Corporation
6
* Copyright 2023 NXP
7
*/
8
9
#include <linux/property.h>
10
11
#include <net/bluetooth/bluetooth.h>
12
#include <net/bluetooth/hci_core.h>
13
#include <net/bluetooth/mgmt.h>
14
15
#include "hci_codec.h"
16
#include "hci_debugfs.h"
17
#include "smp.h"
18
#include "eir.h"
19
#include "msft.h"
20
#include "aosp.h"
21
#include "leds.h"
22
23
static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
24
struct sk_buff *skb)
25
{
26
bt_dev_dbg(hdev, "result 0x%2.2x", result);
27
28
if (hdev->req_status != HCI_REQ_PEND)
29
return;
30
31
hdev->req_result = result;
32
hdev->req_status = HCI_REQ_DONE;
33
34
/* Free the request command so it is not used as response */
35
kfree_skb(hdev->req_skb);
36
hdev->req_skb = NULL;
37
38
if (skb) {
39
struct sock *sk = hci_skb_sk(skb);
40
41
/* Drop sk reference if set */
42
if (sk)
43
sock_put(sk);
44
45
hdev->req_rsp = skb_get(skb);
46
}
47
48
wake_up_interruptible(&hdev->req_wait_q);
49
}
50
51
struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
52
const void *param, struct sock *sk)
53
{
54
int len = HCI_COMMAND_HDR_SIZE + plen;
55
struct hci_command_hdr *hdr;
56
struct sk_buff *skb;
57
58
skb = bt_skb_alloc(len, GFP_ATOMIC);
59
if (!skb)
60
return NULL;
61
62
hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
63
hdr->opcode = cpu_to_le16(opcode);
64
hdr->plen = plen;
65
66
if (plen)
67
skb_put_data(skb, param, plen);
68
69
bt_dev_dbg(hdev, "skb len %d", skb->len);
70
71
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
72
hci_skb_opcode(skb) = opcode;
73
74
/* Grab a reference if command needs to be associated with a sock (e.g.
75
* likely mgmt socket that initiated the command).
76
*/
77
if (sk) {
78
hci_skb_sk(skb) = sk;
79
sock_hold(sk);
80
}
81
82
return skb;
83
}
84
85
static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
86
const void *param, u8 event, struct sock *sk)
87
{
88
struct hci_dev *hdev = req->hdev;
89
struct sk_buff *skb;
90
91
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
92
93
/* If an error occurred during request building, there is no point in
94
* queueing the HCI command. We can simply return.
95
*/
96
if (req->err)
97
return;
98
99
skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
100
if (!skb) {
101
bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
102
opcode);
103
req->err = -ENOMEM;
104
return;
105
}
106
107
if (skb_queue_empty(&req->cmd_q))
108
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
109
110
hci_skb_event(skb) = event;
111
112
skb_queue_tail(&req->cmd_q, skb);
113
}
114
115
static int hci_req_sync_run(struct hci_request *req)
116
{
117
struct hci_dev *hdev = req->hdev;
118
struct sk_buff *skb;
119
unsigned long flags;
120
121
bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
122
123
/* If an error occurred during request building, remove all HCI
124
* commands queued on the HCI request queue.
125
*/
126
if (req->err) {
127
skb_queue_purge(&req->cmd_q);
128
return req->err;
129
}
130
131
/* Do not allow empty requests */
132
if (skb_queue_empty(&req->cmd_q))
133
return -ENODATA;
134
135
skb = skb_peek_tail(&req->cmd_q);
136
bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
137
bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
138
139
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
140
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
141
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
142
143
queue_work(hdev->workqueue, &hdev->cmd_work);
144
145
return 0;
146
}
147
148
static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
149
{
150
skb_queue_head_init(&req->cmd_q);
151
req->hdev = hdev;
152
req->err = 0;
153
}
154
155
/* This function requires the caller holds hdev->req_lock. */
156
struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
157
const void *param, u8 event, u32 timeout,
158
struct sock *sk)
159
{
160
struct hci_request req;
161
struct sk_buff *skb;
162
int err = 0;
163
164
bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
165
166
hci_request_init(&req, hdev);
167
168
hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
169
170
hdev->req_status = HCI_REQ_PEND;
171
172
err = hci_req_sync_run(&req);
173
if (err < 0)
174
return ERR_PTR(err);
175
176
err = wait_event_interruptible_timeout(hdev->req_wait_q,
177
hdev->req_status != HCI_REQ_PEND,
178
timeout);
179
180
if (err == -ERESTARTSYS)
181
return ERR_PTR(-EINTR);
182
183
switch (hdev->req_status) {
184
case HCI_REQ_DONE:
185
err = -bt_to_errno(hdev->req_result);
186
break;
187
188
case HCI_REQ_CANCELED:
189
err = -hdev->req_result;
190
break;
191
192
default:
193
err = -ETIMEDOUT;
194
break;
195
}
196
197
hdev->req_status = 0;
198
hdev->req_result = 0;
199
skb = hdev->req_rsp;
200
hdev->req_rsp = NULL;
201
202
bt_dev_dbg(hdev, "end: err %d", err);
203
204
if (err < 0) {
205
kfree_skb(skb);
206
return ERR_PTR(err);
207
}
208
209
/* If command return a status event skb will be set to NULL as there are
210
* no parameters.
211
*/
212
if (!skb)
213
return ERR_PTR(-ENODATA);
214
215
return skb;
216
}
217
EXPORT_SYMBOL(__hci_cmd_sync_sk);
218
219
/* This function requires the caller holds hdev->req_lock. */
220
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
221
const void *param, u32 timeout)
222
{
223
return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
224
}
225
EXPORT_SYMBOL(__hci_cmd_sync);
226
227
/* Send HCI command and wait for command complete event */
228
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
229
const void *param, u32 timeout)
230
{
231
struct sk_buff *skb;
232
233
if (!test_bit(HCI_UP, &hdev->flags))
234
return ERR_PTR(-ENETDOWN);
235
236
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
237
238
hci_req_sync_lock(hdev);
239
skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
240
hci_req_sync_unlock(hdev);
241
242
return skb;
243
}
244
EXPORT_SYMBOL(hci_cmd_sync);
245
246
/* This function requires the caller holds hdev->req_lock. */
247
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
248
const void *param, u8 event, u32 timeout)
249
{
250
return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
251
NULL);
252
}
253
EXPORT_SYMBOL(__hci_cmd_sync_ev);
254
255
/* This function requires the caller holds hdev->req_lock. */
256
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
257
const void *param, u8 event, u32 timeout,
258
struct sock *sk)
259
{
260
struct sk_buff *skb;
261
u8 status;
262
263
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
264
265
/* If command return a status event, skb will be set to -ENODATA */
266
if (skb == ERR_PTR(-ENODATA))
267
return 0;
268
269
if (IS_ERR(skb)) {
270
if (!event)
271
bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
272
PTR_ERR(skb));
273
return PTR_ERR(skb);
274
}
275
276
status = skb->data[0];
277
278
kfree_skb(skb);
279
280
return status;
281
}
282
EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
283
284
int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
285
const void *param, u32 timeout)
286
{
287
return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
288
NULL);
289
}
290
EXPORT_SYMBOL(__hci_cmd_sync_status);
291
292
int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
293
const void *param, u32 timeout)
294
{
295
int err;
296
297
hci_req_sync_lock(hdev);
298
err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
299
hci_req_sync_unlock(hdev);
300
301
return err;
302
}
303
EXPORT_SYMBOL(hci_cmd_sync_status);
304
305
static void hci_cmd_sync_work(struct work_struct *work)
306
{
307
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
308
309
bt_dev_dbg(hdev, "");
310
311
/* Dequeue all entries and run them */
312
while (1) {
313
struct hci_cmd_sync_work_entry *entry;
314
315
mutex_lock(&hdev->cmd_sync_work_lock);
316
entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
317
struct hci_cmd_sync_work_entry,
318
list);
319
if (entry)
320
list_del(&entry->list);
321
mutex_unlock(&hdev->cmd_sync_work_lock);
322
323
if (!entry)
324
break;
325
326
bt_dev_dbg(hdev, "entry %p", entry);
327
328
if (entry->func) {
329
int err;
330
331
hci_req_sync_lock(hdev);
332
err = entry->func(hdev, entry->data);
333
if (entry->destroy)
334
entry->destroy(hdev, entry->data, err);
335
hci_req_sync_unlock(hdev);
336
}
337
338
kfree(entry);
339
}
340
}
341
342
static void hci_cmd_sync_cancel_work(struct work_struct *work)
343
{
344
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
345
346
cancel_delayed_work_sync(&hdev->cmd_timer);
347
cancel_delayed_work_sync(&hdev->ncmd_timer);
348
atomic_set(&hdev->cmd_cnt, 1);
349
350
wake_up_interruptible(&hdev->req_wait_q);
351
}
352
353
static int hci_scan_disable_sync(struct hci_dev *hdev);
354
static int scan_disable_sync(struct hci_dev *hdev, void *data)
355
{
356
return hci_scan_disable_sync(hdev);
357
}
358
359
static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
360
{
361
return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
362
}
363
364
static void le_scan_disable(struct work_struct *work)
365
{
366
struct hci_dev *hdev = container_of(work, struct hci_dev,
367
le_scan_disable.work);
368
int status;
369
370
bt_dev_dbg(hdev, "");
371
hci_dev_lock(hdev);
372
373
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
374
goto _return;
375
376
status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
377
if (status) {
378
bt_dev_err(hdev, "failed to disable LE scan: %d", status);
379
goto _return;
380
}
381
382
/* If we were running LE only scan, change discovery state. If
383
* we were running both LE and BR/EDR inquiry simultaneously,
384
* and BR/EDR inquiry is already finished, stop discovery,
385
* otherwise BR/EDR inquiry will stop discovery when finished.
386
* If we will resolve remote device name, do not change
387
* discovery state.
388
*/
389
390
if (hdev->discovery.type == DISCOV_TYPE_LE)
391
goto discov_stopped;
392
393
if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
394
goto _return;
395
396
if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) {
397
if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
398
hdev->discovery.state != DISCOVERY_RESOLVING)
399
goto discov_stopped;
400
401
goto _return;
402
}
403
404
status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
405
if (status) {
406
bt_dev_err(hdev, "inquiry failed: status %d", status);
407
goto discov_stopped;
408
}
409
410
goto _return;
411
412
discov_stopped:
413
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
414
415
_return:
416
hci_dev_unlock(hdev);
417
}
418
419
static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
420
u8 filter_dup);
421
422
static int reenable_adv_sync(struct hci_dev *hdev, void *data)
423
{
424
bt_dev_dbg(hdev, "");
425
426
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
427
list_empty(&hdev->adv_instances))
428
return 0;
429
430
if (hdev->cur_adv_instance) {
431
return hci_schedule_adv_instance_sync(hdev,
432
hdev->cur_adv_instance,
433
true);
434
} else {
435
if (ext_adv_capable(hdev)) {
436
hci_start_ext_adv_sync(hdev, 0x00);
437
} else {
438
hci_update_adv_data_sync(hdev, 0x00);
439
hci_update_scan_rsp_data_sync(hdev, 0x00);
440
hci_enable_advertising_sync(hdev);
441
}
442
}
443
444
return 0;
445
}
446
447
static void reenable_adv(struct work_struct *work)
448
{
449
struct hci_dev *hdev = container_of(work, struct hci_dev,
450
reenable_adv_work);
451
int status;
452
453
bt_dev_dbg(hdev, "");
454
455
hci_dev_lock(hdev);
456
457
status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
458
if (status)
459
bt_dev_err(hdev, "failed to reenable ADV: %d", status);
460
461
hci_dev_unlock(hdev);
462
}
463
464
static void cancel_adv_timeout(struct hci_dev *hdev)
465
{
466
if (hdev->adv_instance_timeout) {
467
hdev->adv_instance_timeout = 0;
468
cancel_delayed_work(&hdev->adv_instance_expire);
469
}
470
}
471
472
/* For a single instance:
473
* - force == true: The instance will be removed even when its remaining
474
* lifetime is not zero.
475
* - force == false: the instance will be deactivated but kept stored unless
476
* the remaining lifetime is zero.
477
*
478
* For instance == 0x00:
479
* - force == true: All instances will be removed regardless of their timeout
480
* setting.
481
* - force == false: Only instances that have a timeout will be removed.
482
*/
483
int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
484
u8 instance, bool force)
485
{
486
struct adv_info *adv_instance, *n, *next_instance = NULL;
487
int err;
488
u8 rem_inst;
489
490
/* Cancel any timeout concerning the removed instance(s). */
491
if (!instance || hdev->cur_adv_instance == instance)
492
cancel_adv_timeout(hdev);
493
494
/* Get the next instance to advertise BEFORE we remove
495
* the current one. This can be the same instance again
496
* if there is only one instance.
497
*/
498
if (instance && hdev->cur_adv_instance == instance)
499
next_instance = hci_get_next_instance(hdev, instance);
500
501
if (instance == 0x00) {
502
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
503
list) {
504
if (!(force || adv_instance->timeout))
505
continue;
506
507
rem_inst = adv_instance->instance;
508
err = hci_remove_adv_instance(hdev, rem_inst);
509
if (!err)
510
mgmt_advertising_removed(sk, hdev, rem_inst);
511
}
512
} else {
513
adv_instance = hci_find_adv_instance(hdev, instance);
514
515
if (force || (adv_instance && adv_instance->timeout &&
516
!adv_instance->remaining_time)) {
517
/* Don't advertise a removed instance. */
518
if (next_instance &&
519
next_instance->instance == instance)
520
next_instance = NULL;
521
522
err = hci_remove_adv_instance(hdev, instance);
523
if (!err)
524
mgmt_advertising_removed(sk, hdev, instance);
525
}
526
}
527
528
if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
529
return 0;
530
531
if (next_instance && !ext_adv_capable(hdev))
532
return hci_schedule_adv_instance_sync(hdev,
533
next_instance->instance,
534
false);
535
536
return 0;
537
}
538
539
static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
540
{
541
u8 instance = *(u8 *)data;
542
543
kfree(data);
544
545
hci_clear_adv_instance_sync(hdev, NULL, instance, false);
546
547
if (list_empty(&hdev->adv_instances))
548
return hci_disable_advertising_sync(hdev);
549
550
return 0;
551
}
552
553
static void adv_timeout_expire(struct work_struct *work)
554
{
555
u8 *inst_ptr;
556
struct hci_dev *hdev = container_of(work, struct hci_dev,
557
adv_instance_expire.work);
558
559
bt_dev_dbg(hdev, "");
560
561
hci_dev_lock(hdev);
562
563
hdev->adv_instance_timeout = 0;
564
565
if (hdev->cur_adv_instance == 0x00)
566
goto unlock;
567
568
inst_ptr = kmalloc(1, GFP_KERNEL);
569
if (!inst_ptr)
570
goto unlock;
571
572
*inst_ptr = hdev->cur_adv_instance;
573
hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
574
575
unlock:
576
hci_dev_unlock(hdev);
577
}
578
579
static bool is_interleave_scanning(struct hci_dev *hdev)
580
{
581
return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
582
}
583
584
static int hci_passive_scan_sync(struct hci_dev *hdev);
585
586
static void interleave_scan_work(struct work_struct *work)
587
{
588
struct hci_dev *hdev = container_of(work, struct hci_dev,
589
interleave_scan.work);
590
unsigned long timeout;
591
592
if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
593
timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
594
} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
595
timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
596
} else {
597
bt_dev_err(hdev, "unexpected error");
598
return;
599
}
600
601
hci_passive_scan_sync(hdev);
602
603
hci_dev_lock(hdev);
604
605
switch (hdev->interleave_scan_state) {
606
case INTERLEAVE_SCAN_ALLOWLIST:
607
bt_dev_dbg(hdev, "next state: allowlist");
608
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
609
break;
610
case INTERLEAVE_SCAN_NO_FILTER:
611
bt_dev_dbg(hdev, "next state: no filter");
612
hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
613
break;
614
case INTERLEAVE_SCAN_NONE:
615
bt_dev_err(hdev, "unexpected error");
616
}
617
618
hci_dev_unlock(hdev);
619
620
/* Don't continue interleaving if it was canceled */
621
if (is_interleave_scanning(hdev))
622
queue_delayed_work(hdev->req_workqueue,
623
&hdev->interleave_scan, timeout);
624
}
625
626
void hci_cmd_sync_init(struct hci_dev *hdev)
627
{
628
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
629
INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
630
mutex_init(&hdev->cmd_sync_work_lock);
631
mutex_init(&hdev->unregister_lock);
632
633
INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
634
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
635
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
636
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
637
INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
638
}
639
640
static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
641
struct hci_cmd_sync_work_entry *entry,
642
int err)
643
{
644
if (entry->destroy)
645
entry->destroy(hdev, entry->data, err);
646
647
list_del(&entry->list);
648
kfree(entry);
649
}
650
651
void hci_cmd_sync_clear(struct hci_dev *hdev)
652
{
653
struct hci_cmd_sync_work_entry *entry, *tmp;
654
655
cancel_work_sync(&hdev->cmd_sync_work);
656
cancel_work_sync(&hdev->reenable_adv_work);
657
658
mutex_lock(&hdev->cmd_sync_work_lock);
659
list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
660
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
661
mutex_unlock(&hdev->cmd_sync_work_lock);
662
}
663
664
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
665
{
666
bt_dev_dbg(hdev, "err 0x%2.2x", err);
667
668
if (hdev->req_status == HCI_REQ_PEND) {
669
hdev->req_result = err;
670
hdev->req_status = HCI_REQ_CANCELED;
671
672
queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
673
}
674
}
675
EXPORT_SYMBOL(hci_cmd_sync_cancel);
676
677
/* Cancel ongoing command request synchronously:
678
*
679
* - Set result and mark status to HCI_REQ_CANCELED
680
* - Wakeup command sync thread
681
*/
682
void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
683
{
684
bt_dev_dbg(hdev, "err 0x%2.2x", err);
685
686
if (hdev->req_status == HCI_REQ_PEND) {
687
/* req_result is __u32 so error must be positive to be properly
688
* propagated.
689
*/
690
hdev->req_result = err < 0 ? -err : err;
691
hdev->req_status = HCI_REQ_CANCELED;
692
693
wake_up_interruptible(&hdev->req_wait_q);
694
}
695
}
696
EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
697
698
/* Submit HCI command to be run in as cmd_sync_work:
699
*
700
* - hdev must _not_ be unregistered
701
*/
702
int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
703
void *data, hci_cmd_sync_work_destroy_t destroy)
704
{
705
struct hci_cmd_sync_work_entry *entry;
706
int err = 0;
707
708
mutex_lock(&hdev->unregister_lock);
709
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
710
err = -ENODEV;
711
goto unlock;
712
}
713
714
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
715
if (!entry) {
716
err = -ENOMEM;
717
goto unlock;
718
}
719
entry->func = func;
720
entry->data = data;
721
entry->destroy = destroy;
722
723
mutex_lock(&hdev->cmd_sync_work_lock);
724
list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
725
mutex_unlock(&hdev->cmd_sync_work_lock);
726
727
queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
728
729
unlock:
730
mutex_unlock(&hdev->unregister_lock);
731
return err;
732
}
733
EXPORT_SYMBOL(hci_cmd_sync_submit);
734
735
/* Queue HCI command:
736
*
737
* - hdev must be running
738
*/
739
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
740
void *data, hci_cmd_sync_work_destroy_t destroy)
741
{
742
/* Only queue command if hdev is running which means it had been opened
743
* and is either on init phase or is already up.
744
*/
745
if (!test_bit(HCI_RUNNING, &hdev->flags))
746
return -ENETDOWN;
747
748
return hci_cmd_sync_submit(hdev, func, data, destroy);
749
}
750
EXPORT_SYMBOL(hci_cmd_sync_queue);
751
752
static struct hci_cmd_sync_work_entry *
753
_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
754
void *data, hci_cmd_sync_work_destroy_t destroy)
755
{
756
struct hci_cmd_sync_work_entry *entry, *tmp;
757
758
list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
759
if (func && entry->func != func)
760
continue;
761
762
if (data && entry->data != data)
763
continue;
764
765
if (destroy && entry->destroy != destroy)
766
continue;
767
768
return entry;
769
}
770
771
return NULL;
772
}
773
774
/* Queue HCI command entry once:
775
*
776
* - Lookup if an entry already exist and only if it doesn't creates a new entry
777
* and queue it.
778
*/
779
int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
780
void *data, hci_cmd_sync_work_destroy_t destroy)
781
{
782
if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
783
return 0;
784
785
return hci_cmd_sync_queue(hdev, func, data, destroy);
786
}
787
EXPORT_SYMBOL(hci_cmd_sync_queue_once);
788
789
/* Run HCI command:
790
*
791
* - hdev must be running
792
* - if on cmd_sync_work then run immediately otherwise queue
793
*/
794
int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
795
void *data, hci_cmd_sync_work_destroy_t destroy)
796
{
797
/* Only queue command if hdev is running which means it had been opened
798
* and is either on init phase or is already up.
799
*/
800
if (!test_bit(HCI_RUNNING, &hdev->flags))
801
return -ENETDOWN;
802
803
/* If on cmd_sync_work then run immediately otherwise queue */
804
if (current_work() == &hdev->cmd_sync_work)
805
return func(hdev, data);
806
807
return hci_cmd_sync_submit(hdev, func, data, destroy);
808
}
809
EXPORT_SYMBOL(hci_cmd_sync_run);
810
811
/* Run HCI command entry once:
812
*
813
* - Lookup if an entry already exist and only if it doesn't creates a new entry
814
* and run it.
815
* - if on cmd_sync_work then run immediately otherwise queue
816
*/
817
int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
818
void *data, hci_cmd_sync_work_destroy_t destroy)
819
{
820
if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
821
return 0;
822
823
return hci_cmd_sync_run(hdev, func, data, destroy);
824
}
825
EXPORT_SYMBOL(hci_cmd_sync_run_once);
826
827
/* Lookup HCI command entry:
828
*
829
* - Return first entry that matches by function callback or data or
830
* destroy callback.
831
*/
832
struct hci_cmd_sync_work_entry *
833
hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
834
void *data, hci_cmd_sync_work_destroy_t destroy)
835
{
836
struct hci_cmd_sync_work_entry *entry;
837
838
mutex_lock(&hdev->cmd_sync_work_lock);
839
entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
840
mutex_unlock(&hdev->cmd_sync_work_lock);
841
842
return entry;
843
}
844
EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
845
846
/* Cancel HCI command entry */
847
void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
848
struct hci_cmd_sync_work_entry *entry)
849
{
850
mutex_lock(&hdev->cmd_sync_work_lock);
851
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
852
mutex_unlock(&hdev->cmd_sync_work_lock);
853
}
854
EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
855
856
/* Dequeue one HCI command entry:
857
*
858
* - Lookup and cancel first entry that matches.
859
*/
860
bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
861
hci_cmd_sync_work_func_t func,
862
void *data, hci_cmd_sync_work_destroy_t destroy)
863
{
864
struct hci_cmd_sync_work_entry *entry;
865
866
entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
867
if (!entry)
868
return false;
869
870
hci_cmd_sync_cancel_entry(hdev, entry);
871
872
return true;
873
}
874
EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
875
876
/* Dequeue HCI command entry:
877
*
878
* - Lookup and cancel any entry that matches by function callback or data or
879
* destroy callback.
880
*/
881
bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
882
void *data, hci_cmd_sync_work_destroy_t destroy)
883
{
884
struct hci_cmd_sync_work_entry *entry;
885
bool ret = false;
886
887
mutex_lock(&hdev->cmd_sync_work_lock);
888
while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
889
destroy))) {
890
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
891
ret = true;
892
}
893
mutex_unlock(&hdev->cmd_sync_work_lock);
894
895
return ret;
896
}
897
EXPORT_SYMBOL(hci_cmd_sync_dequeue);
898
899
int hci_update_eir_sync(struct hci_dev *hdev)
900
{
901
struct hci_cp_write_eir cp;
902
903
bt_dev_dbg(hdev, "");
904
905
if (!hdev_is_powered(hdev))
906
return 0;
907
908
if (!lmp_ext_inq_capable(hdev))
909
return 0;
910
911
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
912
return 0;
913
914
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
915
return 0;
916
917
memset(&cp, 0, sizeof(cp));
918
919
eir_create(hdev, cp.data);
920
921
if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
922
return 0;
923
924
memcpy(hdev->eir, cp.data, sizeof(cp.data));
925
926
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
927
HCI_CMD_TIMEOUT);
928
}
929
930
static u8 get_service_classes(struct hci_dev *hdev)
931
{
932
struct bt_uuid *uuid;
933
u8 val = 0;
934
935
list_for_each_entry(uuid, &hdev->uuids, list)
936
val |= uuid->svc_hint;
937
938
return val;
939
}
940
941
int hci_update_class_sync(struct hci_dev *hdev)
942
{
943
u8 cod[3];
944
945
bt_dev_dbg(hdev, "");
946
947
if (!hdev_is_powered(hdev))
948
return 0;
949
950
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
951
return 0;
952
953
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
954
return 0;
955
956
cod[0] = hdev->minor_class;
957
cod[1] = hdev->major_class;
958
cod[2] = get_service_classes(hdev);
959
960
if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
961
cod[1] |= 0x20;
962
963
if (memcmp(cod, hdev->dev_class, 3) == 0)
964
return 0;
965
966
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
967
sizeof(cod), cod, HCI_CMD_TIMEOUT);
968
}
969
970
static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
971
{
972
/* If there is no connection we are OK to advertise. */
973
if (hci_conn_num(hdev, LE_LINK) == 0)
974
return true;
975
976
/* Check le_states if there is any connection in peripheral role. */
977
if (hdev->conn_hash.le_num_peripheral > 0) {
978
/* Peripheral connection state and non connectable mode
979
* bit 20.
980
*/
981
if (!connectable && !(hdev->le_states[2] & 0x10))
982
return false;
983
984
/* Peripheral connection state and connectable mode bit 38
985
* and scannable bit 21.
986
*/
987
if (connectable && (!(hdev->le_states[4] & 0x40) ||
988
!(hdev->le_states[2] & 0x20)))
989
return false;
990
}
991
992
/* Check le_states if there is any connection in central role. */
993
if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
994
/* Central connection state and non connectable mode bit 18. */
995
if (!connectable && !(hdev->le_states[2] & 0x02))
996
return false;
997
998
/* Central connection state and connectable mode bit 35 and
999
* scannable 19.
1000
*/
1001
if (connectable && (!(hdev->le_states[4] & 0x08) ||
1002
!(hdev->le_states[2] & 0x08)))
1003
return false;
1004
}
1005
1006
return true;
1007
}
1008
1009
static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1010
{
1011
/* If privacy is not enabled don't use RPA */
1012
if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1013
return false;
1014
1015
/* If basic privacy mode is enabled use RPA */
1016
if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1017
return true;
1018
1019
/* If limited privacy mode is enabled don't use RPA if we're
1020
* both discoverable and bondable.
1021
*/
1022
if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1023
hci_dev_test_flag(hdev, HCI_BONDABLE))
1024
return false;
1025
1026
/* We're neither bondable nor discoverable in the limited
1027
* privacy mode, therefore use RPA.
1028
*/
1029
return true;
1030
}
1031
1032
static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
1033
{
1034
/* If a random_addr has been set we're advertising or initiating an LE
1035
* connection we can't go ahead and change the random address at this
1036
* time. This is because the eventual initiator address used for the
1037
* subsequently created connection will be undefined (some
1038
* controllers use the new address and others the one we had
1039
* when the operation started).
1040
*
1041
* In this kind of scenario skip the update and let the random
1042
* address be updated at the next cycle.
1043
*/
1044
if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
1045
(hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1046
hci_lookup_le_connect(hdev))) {
1047
bt_dev_dbg(hdev, "Deferring random address update");
1048
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1049
return 0;
1050
}
1051
1052
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1053
6, rpa, HCI_CMD_TIMEOUT);
1054
}
1055
1056
int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1057
bool rpa, u8 *own_addr_type)
1058
{
1059
int err;
1060
1061
/* If privacy is enabled use a resolvable private address. If
1062
* current RPA has expired or there is something else than
1063
* the current RPA in use, then generate a new one.
1064
*/
1065
if (rpa) {
1066
/* If Controller supports LL Privacy use own address type is
1067
* 0x03
1068
*/
1069
if (ll_privacy_capable(hdev))
1070
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1071
else
1072
*own_addr_type = ADDR_LE_DEV_RANDOM;
1073
1074
/* Check if RPA is valid */
1075
if (rpa_valid(hdev))
1076
return 0;
1077
1078
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1079
if (err < 0) {
1080
bt_dev_err(hdev, "failed to generate new RPA");
1081
return err;
1082
}
1083
1084
err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1085
if (err)
1086
return err;
1087
1088
return 0;
1089
}
1090
1091
/* In case of required privacy without resolvable private address,
1092
* use an non-resolvable private address. This is useful for active
1093
* scanning and non-connectable advertising.
1094
*/
1095
if (require_privacy) {
1096
bdaddr_t nrpa;
1097
1098
while (true) {
1099
/* The non-resolvable private address is generated
1100
* from random six bytes with the two most significant
1101
* bits cleared.
1102
*/
1103
get_random_bytes(&nrpa, 6);
1104
nrpa.b[5] &= 0x3f;
1105
1106
/* The non-resolvable private address shall not be
1107
* equal to the public address.
1108
*/
1109
if (bacmp(&hdev->bdaddr, &nrpa))
1110
break;
1111
}
1112
1113
*own_addr_type = ADDR_LE_DEV_RANDOM;
1114
1115
return hci_set_random_addr_sync(hdev, &nrpa);
1116
}
1117
1118
/* If forcing static address is in use or there is no public
1119
* address use the static address as random address (but skip
1120
* the HCI command if the current random address is already the
1121
* static one.
1122
*
1123
* In case BR/EDR has been disabled on a dual-mode controller
1124
* and a static address has been configured, then use that
1125
* address instead of the public BR/EDR address.
1126
*/
1127
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1128
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1129
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1130
bacmp(&hdev->static_addr, BDADDR_ANY))) {
1131
*own_addr_type = ADDR_LE_DEV_RANDOM;
1132
if (bacmp(&hdev->static_addr, &hdev->random_addr))
1133
return hci_set_random_addr_sync(hdev,
1134
&hdev->static_addr);
1135
return 0;
1136
}
1137
1138
/* Neither privacy nor static address is being used so use a
1139
* public address.
1140
*/
1141
*own_addr_type = ADDR_LE_DEV_PUBLIC;
1142
1143
return 0;
1144
}
1145
1146
static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1147
{
1148
struct hci_cp_le_set_ext_adv_enable *cp;
1149
struct hci_cp_ext_adv_set *set;
1150
u8 data[sizeof(*cp) + sizeof(*set) * 1];
1151
u8 size;
1152
struct adv_info *adv = NULL;
1153
1154
/* If request specifies an instance that doesn't exist, fail */
1155
if (instance > 0) {
1156
adv = hci_find_adv_instance(hdev, instance);
1157
if (!adv)
1158
return -EINVAL;
1159
1160
/* If not enabled there is nothing to do */
1161
if (!adv->enabled)
1162
return 0;
1163
}
1164
1165
memset(data, 0, sizeof(data));
1166
1167
cp = (void *)data;
1168
set = (void *)cp->data;
1169
1170
/* Instance 0x00 indicates all advertising instances will be disabled */
1171
cp->num_of_sets = !!instance;
1172
cp->enable = 0x00;
1173
1174
set->handle = adv ? adv->handle : instance;
1175
1176
size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1177
1178
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1179
size, data, HCI_CMD_TIMEOUT);
1180
}
1181
1182
static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1183
bdaddr_t *random_addr)
1184
{
1185
struct hci_cp_le_set_adv_set_rand_addr cp;
1186
int err;
1187
1188
if (!instance) {
1189
/* Instance 0x00 doesn't have an adv_info, instead it uses
1190
* hdev->random_addr to track its address so whenever it needs
1191
* to be updated this also set the random address since
1192
* hdev->random_addr is shared with scan state machine.
1193
*/
1194
err = hci_set_random_addr_sync(hdev, random_addr);
1195
if (err)
1196
return err;
1197
}
1198
1199
memset(&cp, 0, sizeof(cp));
1200
1201
cp.handle = instance;
1202
bacpy(&cp.bdaddr, random_addr);
1203
1204
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1205
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1206
}
1207
1208
static int
1209
hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv,
1210
const struct hci_cp_le_set_ext_adv_params *cp,
1211
struct hci_rp_le_set_ext_adv_params *rp)
1212
{
1213
struct sk_buff *skb;
1214
1215
skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp),
1216
cp, HCI_CMD_TIMEOUT);
1217
1218
/* If command return a status event, skb will be set to -ENODATA */
1219
if (skb == ERR_PTR(-ENODATA))
1220
return 0;
1221
1222
if (IS_ERR(skb)) {
1223
bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld",
1224
HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb));
1225
return PTR_ERR(skb);
1226
}
1227
1228
if (skb->len != sizeof(*rp)) {
1229
bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u",
1230
HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len);
1231
kfree_skb(skb);
1232
return -EIO;
1233
}
1234
1235
memcpy(rp, skb->data, sizeof(*rp));
1236
kfree_skb(skb);
1237
1238
if (!rp->status) {
1239
hdev->adv_addr_type = cp->own_addr_type;
1240
if (!cp->handle) {
1241
/* Store in hdev for instance 0 */
1242
hdev->adv_tx_power = rp->tx_power;
1243
} else if (adv) {
1244
adv->tx_power = rp->tx_power;
1245
}
1246
}
1247
1248
return rp->status;
1249
}
1250
1251
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1252
{
1253
DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1254
HCI_MAX_EXT_AD_LENGTH);
1255
u8 len;
1256
struct adv_info *adv = NULL;
1257
int err;
1258
1259
if (instance) {
1260
adv = hci_find_adv_instance(hdev, instance);
1261
if (!adv || !adv->adv_data_changed)
1262
return 0;
1263
}
1264
1265
len = eir_create_adv_data(hdev, instance, pdu->data,
1266
HCI_MAX_EXT_AD_LENGTH);
1267
1268
pdu->length = len;
1269
pdu->handle = adv ? adv->handle : instance;
1270
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1271
pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1272
1273
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1274
struct_size(pdu, data, len), pdu,
1275
HCI_CMD_TIMEOUT);
1276
if (err)
1277
return err;
1278
1279
/* Update data if the command succeed */
1280
if (adv) {
1281
adv->adv_data_changed = false;
1282
} else {
1283
memcpy(hdev->adv_data, pdu->data, len);
1284
hdev->adv_data_len = len;
1285
}
1286
1287
return 0;
1288
}
1289
1290
static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1291
{
1292
struct hci_cp_le_set_adv_data cp;
1293
u8 len;
1294
1295
memset(&cp, 0, sizeof(cp));
1296
1297
len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
1298
1299
/* There's nothing to do if the data hasn't changed */
1300
if (hdev->adv_data_len == len &&
1301
memcmp(cp.data, hdev->adv_data, len) == 0)
1302
return 0;
1303
1304
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1305
hdev->adv_data_len = len;
1306
1307
cp.length = len;
1308
1309
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1310
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1311
}
1312
1313
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1314
{
1315
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1316
return 0;
1317
1318
if (ext_adv_capable(hdev))
1319
return hci_set_ext_adv_data_sync(hdev, instance);
1320
1321
return hci_set_adv_data_sync(hdev, instance);
1322
}
1323
1324
int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1325
{
1326
struct hci_cp_le_set_ext_adv_params cp;
1327
struct hci_rp_le_set_ext_adv_params rp;
1328
bool connectable;
1329
u32 flags;
1330
bdaddr_t random_addr;
1331
u8 own_addr_type;
1332
int err;
1333
struct adv_info *adv;
1334
bool secondary_adv;
1335
1336
if (instance > 0) {
1337
adv = hci_find_adv_instance(hdev, instance);
1338
if (!adv)
1339
return -EINVAL;
1340
} else {
1341
adv = NULL;
1342
}
1343
1344
/* Updating parameters of an active instance will return a
1345
* Command Disallowed error, so we must first disable the
1346
* instance if it is active.
1347
*/
1348
if (adv) {
1349
err = hci_disable_ext_adv_instance_sync(hdev, instance);
1350
if (err)
1351
return err;
1352
}
1353
1354
flags = hci_adv_instance_flags(hdev, instance);
1355
1356
/* If the "connectable" instance flag was not set, then choose between
1357
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1358
*/
1359
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1360
mgmt_get_connectable(hdev);
1361
1362
if (!is_advertising_allowed(hdev, connectable))
1363
return -EPERM;
1364
1365
/* Set require_privacy to true only when non-connectable
1366
* advertising is used. In that case it is fine to use a
1367
* non-resolvable private address.
1368
*/
1369
err = hci_get_random_address(hdev, !connectable,
1370
adv_use_rpa(hdev, flags), adv,
1371
&own_addr_type, &random_addr);
1372
if (err < 0)
1373
return err;
1374
1375
memset(&cp, 0, sizeof(cp));
1376
1377
if (adv) {
1378
hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1379
hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1380
cp.tx_power = adv->tx_power;
1381
cp.sid = adv->sid;
1382
} else {
1383
hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1384
hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1385
cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1386
cp.sid = 0x00;
1387
}
1388
1389
secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1390
1391
if (connectable) {
1392
if (secondary_adv)
1393
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1394
else
1395
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1396
} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1397
(flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1398
if (secondary_adv)
1399
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1400
else
1401
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1402
} else {
1403
if (secondary_adv)
1404
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1405
else
1406
cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1407
}
1408
1409
/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1410
* contains the peer’s Identity Address and the Peer_Address_Type
1411
* parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1412
* These parameters are used to locate the corresponding local IRK in
1413
* the resolving list; this IRK is used to generate their own address
1414
* used in the advertisement.
1415
*/
1416
if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1417
hci_copy_identity_address(hdev, &cp.peer_addr,
1418
&cp.peer_addr_type);
1419
1420
cp.own_addr_type = own_addr_type;
1421
cp.channel_map = hdev->le_adv_channel_map;
1422
cp.handle = adv ? adv->handle : instance;
1423
1424
if (flags & MGMT_ADV_FLAG_SEC_2M) {
1425
cp.primary_phy = HCI_ADV_PHY_1M;
1426
cp.secondary_phy = HCI_ADV_PHY_2M;
1427
} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1428
cp.primary_phy = HCI_ADV_PHY_CODED;
1429
cp.secondary_phy = HCI_ADV_PHY_CODED;
1430
} else {
1431
/* In all other cases use 1M */
1432
cp.primary_phy = HCI_ADV_PHY_1M;
1433
cp.secondary_phy = HCI_ADV_PHY_1M;
1434
}
1435
1436
err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp);
1437
if (err)
1438
return err;
1439
1440
/* Update adv data as tx power is known now */
1441
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
1442
if (err)
1443
return err;
1444
1445
if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1446
own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1447
bacmp(&random_addr, BDADDR_ANY)) {
1448
/* Check if random address need to be updated */
1449
if (adv) {
1450
if (!bacmp(&random_addr, &adv->random_addr))
1451
return 0;
1452
} else {
1453
if (!bacmp(&random_addr, &hdev->random_addr))
1454
return 0;
1455
}
1456
1457
return hci_set_adv_set_random_addr_sync(hdev, instance,
1458
&random_addr);
1459
}
1460
1461
return 0;
1462
}
1463
1464
static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1465
{
1466
DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1467
HCI_MAX_EXT_AD_LENGTH);
1468
u8 len;
1469
struct adv_info *adv = NULL;
1470
int err;
1471
1472
if (instance) {
1473
adv = hci_find_adv_instance(hdev, instance);
1474
if (!adv || !adv->scan_rsp_changed)
1475
return 0;
1476
}
1477
1478
len = eir_create_scan_rsp(hdev, instance, pdu->data);
1479
1480
pdu->handle = adv ? adv->handle : instance;
1481
pdu->length = len;
1482
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1483
pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1484
1485
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1486
struct_size(pdu, data, len), pdu,
1487
HCI_CMD_TIMEOUT);
1488
if (err)
1489
return err;
1490
1491
if (adv) {
1492
adv->scan_rsp_changed = false;
1493
} else {
1494
memcpy(hdev->scan_rsp_data, pdu->data, len);
1495
hdev->scan_rsp_data_len = len;
1496
}
1497
1498
return 0;
1499
}
1500
1501
static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1502
{
1503
struct hci_cp_le_set_scan_rsp_data cp;
1504
u8 len;
1505
1506
memset(&cp, 0, sizeof(cp));
1507
1508
len = eir_create_scan_rsp(hdev, instance, cp.data);
1509
1510
if (hdev->scan_rsp_data_len == len &&
1511
!memcmp(cp.data, hdev->scan_rsp_data, len))
1512
return 0;
1513
1514
memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1515
hdev->scan_rsp_data_len = len;
1516
1517
cp.length = len;
1518
1519
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1520
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1521
}
1522
1523
int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1524
{
1525
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1526
return 0;
1527
1528
if (ext_adv_capable(hdev))
1529
return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1530
1531
return __hci_set_scan_rsp_data_sync(hdev, instance);
1532
}
1533
1534
int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1535
{
1536
struct hci_cp_le_set_ext_adv_enable *cp;
1537
struct hci_cp_ext_adv_set *set;
1538
u8 data[sizeof(*cp) + sizeof(*set) * 1];
1539
struct adv_info *adv;
1540
1541
if (instance > 0) {
1542
adv = hci_find_adv_instance(hdev, instance);
1543
if (!adv)
1544
return -EINVAL;
1545
/* If already enabled there is nothing to do */
1546
if (adv->enabled)
1547
return 0;
1548
} else {
1549
adv = NULL;
1550
}
1551
1552
cp = (void *)data;
1553
set = (void *)cp->data;
1554
1555
memset(cp, 0, sizeof(*cp));
1556
1557
cp->enable = 0x01;
1558
cp->num_of_sets = 0x01;
1559
1560
memset(set, 0, sizeof(*set));
1561
1562
set->handle = adv ? adv->handle : instance;
1563
1564
/* Set duration per instance since controller is responsible for
1565
* scheduling it.
1566
*/
1567
if (adv && adv->timeout) {
1568
u16 duration = adv->timeout * MSEC_PER_SEC;
1569
1570
/* Time = N * 10 ms */
1571
set->duration = cpu_to_le16(duration / 10);
1572
}
1573
1574
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1575
sizeof(*cp) +
1576
sizeof(*set) * cp->num_of_sets,
1577
data, HCI_CMD_TIMEOUT);
1578
}
1579
1580
int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1581
{
1582
int err;
1583
1584
err = hci_setup_ext_adv_instance_sync(hdev, instance);
1585
if (err)
1586
return err;
1587
1588
err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1589
if (err)
1590
return err;
1591
1592
return hci_enable_ext_advertising_sync(hdev, instance);
1593
}
1594
1595
int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1596
{
1597
struct hci_cp_le_set_per_adv_enable cp;
1598
struct adv_info *adv = NULL;
1599
1600
/* If periodic advertising already disabled there is nothing to do. */
1601
adv = hci_find_adv_instance(hdev, instance);
1602
if (!adv || !adv->periodic || !adv->enabled)
1603
return 0;
1604
1605
memset(&cp, 0, sizeof(cp));
1606
1607
cp.enable = 0x00;
1608
cp.handle = instance;
1609
1610
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1611
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1612
}
1613
1614
static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1615
u16 min_interval, u16 max_interval)
1616
{
1617
struct hci_cp_le_set_per_adv_params cp;
1618
1619
memset(&cp, 0, sizeof(cp));
1620
1621
if (!min_interval)
1622
min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1623
1624
if (!max_interval)
1625
max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1626
1627
cp.handle = instance;
1628
cp.min_interval = cpu_to_le16(min_interval);
1629
cp.max_interval = cpu_to_le16(max_interval);
1630
cp.periodic_properties = 0x0000;
1631
1632
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1633
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1634
}
1635
1636
static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1637
{
1638
DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1639
HCI_MAX_PER_AD_LENGTH);
1640
u8 len;
1641
struct adv_info *adv = NULL;
1642
1643
if (instance) {
1644
adv = hci_find_adv_instance(hdev, instance);
1645
if (!adv || !adv->periodic)
1646
return 0;
1647
}
1648
1649
len = eir_create_per_adv_data(hdev, instance, pdu->data);
1650
1651
pdu->length = len;
1652
pdu->handle = adv ? adv->handle : instance;
1653
pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1654
1655
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1656
struct_size(pdu, data, len), pdu,
1657
HCI_CMD_TIMEOUT);
1658
}
1659
1660
static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1661
{
1662
struct hci_cp_le_set_per_adv_enable cp;
1663
struct adv_info *adv = NULL;
1664
1665
/* If periodic advertising already enabled there is nothing to do. */
1666
adv = hci_find_adv_instance(hdev, instance);
1667
if (adv && adv->periodic && adv->enabled)
1668
return 0;
1669
1670
memset(&cp, 0, sizeof(cp));
1671
1672
cp.enable = 0x01;
1673
cp.handle = instance;
1674
1675
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1676
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1677
}
1678
1679
/* Checks if periodic advertising data contains a Basic Announcement and if it
1680
* does generates a Broadcast ID and add Broadcast Announcement.
1681
*/
1682
static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1683
{
1684
u8 bid[3];
1685
u8 ad[HCI_MAX_EXT_AD_LENGTH];
1686
u8 len;
1687
1688
/* Skip if NULL adv as instance 0x00 is used for general purpose
1689
* advertising so it cannot used for the likes of Broadcast Announcement
1690
* as it can be overwritten at any point.
1691
*/
1692
if (!adv)
1693
return 0;
1694
1695
/* Check if PA data doesn't contains a Basic Audio Announcement then
1696
* there is nothing to do.
1697
*/
1698
if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1699
0x1851, NULL))
1700
return 0;
1701
1702
/* Check if advertising data already has a Broadcast Announcement since
1703
* the process may want to control the Broadcast ID directly and in that
1704
* case the kernel shall no interfere.
1705
*/
1706
if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1707
NULL))
1708
return 0;
1709
1710
/* Generate Broadcast ID */
1711
get_random_bytes(bid, sizeof(bid));
1712
len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1713
memcpy(ad + len, adv->adv_data, adv->adv_data_len);
1714
hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len,
1715
ad, 0, NULL);
1716
1717
return hci_update_adv_data_sync(hdev, adv->instance);
1718
}
1719
1720
int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 sid,
1721
u8 data_len, u8 *data, u32 flags, u16 min_interval,
1722
u16 max_interval, u16 sync_interval)
1723
{
1724
struct adv_info *adv = NULL;
1725
int err;
1726
bool added = false;
1727
1728
hci_disable_per_advertising_sync(hdev, instance);
1729
1730
if (instance) {
1731
adv = hci_find_adv_instance(hdev, instance);
1732
if (adv) {
1733
if (sid != HCI_SID_INVALID && adv->sid != sid) {
1734
/* If the SID don't match attempt to find by
1735
* SID.
1736
*/
1737
adv = hci_find_adv_sid(hdev, sid);
1738
if (!adv) {
1739
bt_dev_err(hdev,
1740
"Unable to find adv_info");
1741
return -EINVAL;
1742
}
1743
}
1744
1745
/* Turn it into periodic advertising */
1746
adv->periodic = true;
1747
adv->per_adv_data_len = data_len;
1748
if (data)
1749
memcpy(adv->per_adv_data, data, data_len);
1750
adv->flags = flags;
1751
} else if (!adv) {
1752
/* Create an instance if that could not be found */
1753
adv = hci_add_per_instance(hdev, instance, sid, flags,
1754
data_len, data,
1755
sync_interval,
1756
sync_interval);
1757
if (IS_ERR(adv))
1758
return PTR_ERR(adv);
1759
adv->pending = false;
1760
added = true;
1761
}
1762
}
1763
1764
/* Start advertising */
1765
err = hci_start_ext_adv_sync(hdev, instance);
1766
if (err < 0)
1767
goto fail;
1768
1769
err = hci_adv_bcast_annoucement(hdev, adv);
1770
if (err < 0)
1771
goto fail;
1772
1773
err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1774
max_interval);
1775
if (err < 0)
1776
goto fail;
1777
1778
err = hci_set_per_adv_data_sync(hdev, instance);
1779
if (err < 0)
1780
goto fail;
1781
1782
err = hci_enable_per_advertising_sync(hdev, instance);
1783
if (err < 0)
1784
goto fail;
1785
1786
return 0;
1787
1788
fail:
1789
if (added)
1790
hci_remove_adv_instance(hdev, instance);
1791
1792
return err;
1793
}
1794
1795
static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1796
{
1797
int err;
1798
1799
if (ext_adv_capable(hdev))
1800
return hci_start_ext_adv_sync(hdev, instance);
1801
1802
err = hci_update_adv_data_sync(hdev, instance);
1803
if (err)
1804
return err;
1805
1806
err = hci_update_scan_rsp_data_sync(hdev, instance);
1807
if (err)
1808
return err;
1809
1810
return hci_enable_advertising_sync(hdev);
1811
}
1812
1813
int hci_enable_advertising_sync(struct hci_dev *hdev)
1814
{
1815
struct adv_info *adv_instance;
1816
struct hci_cp_le_set_adv_param cp;
1817
u8 own_addr_type, enable = 0x01;
1818
bool connectable;
1819
u16 adv_min_interval, adv_max_interval;
1820
u32 flags;
1821
u8 status;
1822
1823
if (ext_adv_capable(hdev))
1824
return hci_enable_ext_advertising_sync(hdev,
1825
hdev->cur_adv_instance);
1826
1827
flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1828
adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1829
1830
/* If the "connectable" instance flag was not set, then choose between
1831
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1832
*/
1833
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1834
mgmt_get_connectable(hdev);
1835
1836
if (!is_advertising_allowed(hdev, connectable))
1837
return -EINVAL;
1838
1839
status = hci_disable_advertising_sync(hdev);
1840
if (status)
1841
return status;
1842
1843
/* Clear the HCI_LE_ADV bit temporarily so that the
1844
* hci_update_random_address knows that it's safe to go ahead
1845
* and write a new random address. The flag will be set back on
1846
* as soon as the SET_ADV_ENABLE HCI command completes.
1847
*/
1848
hci_dev_clear_flag(hdev, HCI_LE_ADV);
1849
1850
/* Set require_privacy to true only when non-connectable
1851
* advertising is used. In that case it is fine to use a
1852
* non-resolvable private address.
1853
*/
1854
status = hci_update_random_address_sync(hdev, !connectable,
1855
adv_use_rpa(hdev, flags),
1856
&own_addr_type);
1857
if (status)
1858
return status;
1859
1860
memset(&cp, 0, sizeof(cp));
1861
1862
if (adv_instance) {
1863
adv_min_interval = adv_instance->min_interval;
1864
adv_max_interval = adv_instance->max_interval;
1865
} else {
1866
adv_min_interval = hdev->le_adv_min_interval;
1867
adv_max_interval = hdev->le_adv_max_interval;
1868
}
1869
1870
if (connectable) {
1871
cp.type = LE_ADV_IND;
1872
} else {
1873
if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1874
cp.type = LE_ADV_SCAN_IND;
1875
else
1876
cp.type = LE_ADV_NONCONN_IND;
1877
1878
if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1879
hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1880
adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1881
adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1882
}
1883
}
1884
1885
cp.min_interval = cpu_to_le16(adv_min_interval);
1886
cp.max_interval = cpu_to_le16(adv_max_interval);
1887
cp.own_address_type = own_addr_type;
1888
cp.channel_map = hdev->le_adv_channel_map;
1889
1890
status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1891
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1892
if (status)
1893
return status;
1894
1895
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1896
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1897
}
1898
1899
static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1900
{
1901
return hci_enable_advertising_sync(hdev);
1902
}
1903
1904
int hci_enable_advertising(struct hci_dev *hdev)
1905
{
1906
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1907
list_empty(&hdev->adv_instances))
1908
return 0;
1909
1910
return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1911
}
1912
1913
int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1914
struct sock *sk)
1915
{
1916
int err;
1917
1918
if (!ext_adv_capable(hdev))
1919
return 0;
1920
1921
err = hci_disable_ext_adv_instance_sync(hdev, instance);
1922
if (err)
1923
return err;
1924
1925
/* If request specifies an instance that doesn't exist, fail */
1926
if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1927
return -EINVAL;
1928
1929
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1930
sizeof(instance), &instance, 0,
1931
HCI_CMD_TIMEOUT, sk);
1932
}
1933
1934
int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1935
{
1936
struct hci_cp_le_term_big cp;
1937
1938
memset(&cp, 0, sizeof(cp));
1939
cp.handle = handle;
1940
cp.reason = reason;
1941
1942
return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1943
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1944
}
1945
1946
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1947
bool force)
1948
{
1949
struct adv_info *adv = NULL;
1950
u16 timeout;
1951
1952
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1953
return -EPERM;
1954
1955
if (hdev->adv_instance_timeout)
1956
return -EBUSY;
1957
1958
adv = hci_find_adv_instance(hdev, instance);
1959
if (!adv)
1960
return -ENOENT;
1961
1962
/* A zero timeout means unlimited advertising. As long as there is
1963
* only one instance, duration should be ignored. We still set a timeout
1964
* in case further instances are being added later on.
1965
*
1966
* If the remaining lifetime of the instance is more than the duration
1967
* then the timeout corresponds to the duration, otherwise it will be
1968
* reduced to the remaining instance lifetime.
1969
*/
1970
if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1971
timeout = adv->duration;
1972
else
1973
timeout = adv->remaining_time;
1974
1975
/* The remaining time is being reduced unless the instance is being
1976
* advertised without time limit.
1977
*/
1978
if (adv->timeout)
1979
adv->remaining_time = adv->remaining_time - timeout;
1980
1981
/* Only use work for scheduling instances with legacy advertising */
1982
if (!ext_adv_capable(hdev)) {
1983
hdev->adv_instance_timeout = timeout;
1984
queue_delayed_work(hdev->req_workqueue,
1985
&hdev->adv_instance_expire,
1986
secs_to_jiffies(timeout));
1987
}
1988
1989
/* If we're just re-scheduling the same instance again then do not
1990
* execute any HCI commands. This happens when a single instance is
1991
* being advertised.
1992
*/
1993
if (!force && hdev->cur_adv_instance == instance &&
1994
hci_dev_test_flag(hdev, HCI_LE_ADV))
1995
return 0;
1996
1997
hdev->cur_adv_instance = instance;
1998
1999
return hci_start_adv_sync(hdev, instance);
2000
}
2001
2002
static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
2003
{
2004
int err;
2005
2006
if (!ext_adv_capable(hdev))
2007
return 0;
2008
2009
/* Disable instance 0x00 to disable all instances */
2010
err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2011
if (err)
2012
return err;
2013
2014
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
2015
0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2016
}
2017
2018
static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
2019
{
2020
struct adv_info *adv, *n;
2021
2022
if (ext_adv_capable(hdev))
2023
/* Remove all existing sets */
2024
return hci_clear_adv_sets_sync(hdev, sk);
2025
2026
/* This is safe as long as there is no command send while the lock is
2027
* held.
2028
*/
2029
hci_dev_lock(hdev);
2030
2031
/* Cleanup non-ext instances */
2032
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
2033
u8 instance = adv->instance;
2034
int err;
2035
2036
if (!(force || adv->timeout))
2037
continue;
2038
2039
err = hci_remove_adv_instance(hdev, instance);
2040
if (!err)
2041
mgmt_advertising_removed(sk, hdev, instance);
2042
}
2043
2044
hci_dev_unlock(hdev);
2045
2046
return 0;
2047
}
2048
2049
static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
2050
struct sock *sk)
2051
{
2052
int err;
2053
2054
/* If we use extended advertising, instance has to be removed first. */
2055
if (ext_adv_capable(hdev))
2056
return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
2057
2058
/* This is safe as long as there is no command send while the lock is
2059
* held.
2060
*/
2061
hci_dev_lock(hdev);
2062
2063
err = hci_remove_adv_instance(hdev, instance);
2064
if (!err)
2065
mgmt_advertising_removed(sk, hdev, instance);
2066
2067
hci_dev_unlock(hdev);
2068
2069
return err;
2070
}
2071
2072
/* For a single instance:
2073
* - force == true: The instance will be removed even when its remaining
2074
* lifetime is not zero.
2075
* - force == false: the instance will be deactivated but kept stored unless
2076
* the remaining lifetime is zero.
2077
*
2078
* For instance == 0x00:
2079
* - force == true: All instances will be removed regardless of their timeout
2080
* setting.
2081
* - force == false: Only instances that have a timeout will be removed.
2082
*/
2083
int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
2084
u8 instance, bool force)
2085
{
2086
struct adv_info *next = NULL;
2087
int err;
2088
2089
/* Cancel any timeout concerning the removed instance(s). */
2090
if (!instance || hdev->cur_adv_instance == instance)
2091
cancel_adv_timeout(hdev);
2092
2093
/* Get the next instance to advertise BEFORE we remove
2094
* the current one. This can be the same instance again
2095
* if there is only one instance.
2096
*/
2097
if (hdev->cur_adv_instance == instance)
2098
next = hci_get_next_instance(hdev, instance);
2099
2100
if (!instance) {
2101
err = hci_clear_adv_sync(hdev, sk, force);
2102
if (err)
2103
return err;
2104
} else {
2105
struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2106
2107
if (force || (adv && adv->timeout && !adv->remaining_time)) {
2108
/* Don't advertise a removed instance. */
2109
if (next && next->instance == instance)
2110
next = NULL;
2111
2112
err = hci_remove_adv_sync(hdev, instance, sk);
2113
if (err)
2114
return err;
2115
}
2116
}
2117
2118
if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2119
return 0;
2120
2121
if (next && !ext_adv_capable(hdev))
2122
hci_schedule_adv_instance_sync(hdev, next->instance, false);
2123
2124
return 0;
2125
}
2126
2127
int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2128
{
2129
struct hci_cp_read_rssi cp;
2130
2131
cp.handle = handle;
2132
return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2133
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2134
}
2135
2136
int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2137
{
2138
return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2139
sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2140
}
2141
2142
int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2143
{
2144
struct hci_cp_read_tx_power cp;
2145
2146
cp.handle = handle;
2147
cp.type = type;
2148
return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2149
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2150
}
2151
2152
int hci_disable_advertising_sync(struct hci_dev *hdev)
2153
{
2154
u8 enable = 0x00;
2155
2156
/* If controller is not advertising we are done. */
2157
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2158
return 0;
2159
2160
if (ext_adv_capable(hdev))
2161
return hci_disable_ext_adv_instance_sync(hdev, 0x00);
2162
2163
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2164
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2165
}
2166
2167
static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2168
u8 filter_dup)
2169
{
2170
struct hci_cp_le_set_ext_scan_enable cp;
2171
2172
memset(&cp, 0, sizeof(cp));
2173
cp.enable = val;
2174
2175
if (hci_dev_test_flag(hdev, HCI_MESH))
2176
cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2177
else
2178
cp.filter_dup = filter_dup;
2179
2180
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2181
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2182
}
2183
2184
static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2185
u8 filter_dup)
2186
{
2187
struct hci_cp_le_set_scan_enable cp;
2188
2189
if (use_ext_scan(hdev))
2190
return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2191
2192
memset(&cp, 0, sizeof(cp));
2193
cp.enable = val;
2194
2195
if (val && hci_dev_test_flag(hdev, HCI_MESH))
2196
cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2197
else
2198
cp.filter_dup = filter_dup;
2199
2200
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2201
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2202
}
2203
2204
static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2205
{
2206
if (!ll_privacy_capable(hdev))
2207
return 0;
2208
2209
/* If controller is not/already resolving we are done. */
2210
if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2211
return 0;
2212
2213
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2214
sizeof(val), &val, HCI_CMD_TIMEOUT);
2215
}
2216
2217
static int hci_scan_disable_sync(struct hci_dev *hdev)
2218
{
2219
int err;
2220
2221
/* If controller is not scanning we are done. */
2222
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2223
return 0;
2224
2225
if (hdev->scanning_paused) {
2226
bt_dev_dbg(hdev, "Scanning is paused for suspend");
2227
return 0;
2228
}
2229
2230
err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2231
if (err) {
2232
bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2233
return err;
2234
}
2235
2236
return err;
2237
}
2238
2239
static bool scan_use_rpa(struct hci_dev *hdev)
2240
{
2241
return hci_dev_test_flag(hdev, HCI_PRIVACY);
2242
}
2243
2244
static void hci_start_interleave_scan(struct hci_dev *hdev)
2245
{
2246
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2247
queue_delayed_work(hdev->req_workqueue,
2248
&hdev->interleave_scan, 0);
2249
}
2250
2251
static void cancel_interleave_scan(struct hci_dev *hdev)
2252
{
2253
bt_dev_dbg(hdev, "cancelling interleave scan");
2254
2255
cancel_delayed_work_sync(&hdev->interleave_scan);
2256
2257
hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2258
}
2259
2260
/* Return true if interleave_scan wasn't started until exiting this function,
2261
* otherwise, return false
2262
*/
2263
static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2264
{
2265
/* Do interleaved scan only if all of the following are true:
2266
* - There is at least one ADV monitor
2267
* - At least one pending LE connection or one device to be scanned for
2268
* - Monitor offloading is not supported
2269
* If so, we should alternate between allowlist scan and one without
2270
* any filters to save power.
2271
*/
2272
bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2273
!(list_empty(&hdev->pend_le_conns) &&
2274
list_empty(&hdev->pend_le_reports)) &&
2275
hci_get_adv_monitor_offload_ext(hdev) ==
2276
HCI_ADV_MONITOR_EXT_NONE;
2277
bool is_interleaving = is_interleave_scanning(hdev);
2278
2279
if (use_interleaving && !is_interleaving) {
2280
hci_start_interleave_scan(hdev);
2281
bt_dev_dbg(hdev, "starting interleave scan");
2282
return true;
2283
}
2284
2285
if (!use_interleaving && is_interleaving)
2286
cancel_interleave_scan(hdev);
2287
2288
return false;
2289
}
2290
2291
/* Removes connection to resolve list if needed.*/
2292
static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2293
bdaddr_t *bdaddr, u8 bdaddr_type)
2294
{
2295
struct hci_cp_le_del_from_resolv_list cp;
2296
struct bdaddr_list_with_irk *entry;
2297
2298
if (!ll_privacy_capable(hdev))
2299
return 0;
2300
2301
/* Check if the IRK has been programmed */
2302
entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2303
bdaddr_type);
2304
if (!entry)
2305
return 0;
2306
2307
cp.bdaddr_type = bdaddr_type;
2308
bacpy(&cp.bdaddr, bdaddr);
2309
2310
return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2311
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2312
}
2313
2314
static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2315
bdaddr_t *bdaddr, u8 bdaddr_type)
2316
{
2317
struct hci_cp_le_del_from_accept_list cp;
2318
int err;
2319
2320
/* Check if device is on accept list before removing it */
2321
if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2322
return 0;
2323
2324
cp.bdaddr_type = bdaddr_type;
2325
bacpy(&cp.bdaddr, bdaddr);
2326
2327
/* Ignore errors when removing from resolving list as that is likely
2328
* that the device was never added.
2329
*/
2330
hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2331
2332
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2333
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2334
if (err) {
2335
bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2336
return err;
2337
}
2338
2339
bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2340
cp.bdaddr_type);
2341
2342
return 0;
2343
}
2344
2345
struct conn_params {
2346
bdaddr_t addr;
2347
u8 addr_type;
2348
hci_conn_flags_t flags;
2349
u8 privacy_mode;
2350
};
2351
2352
/* Adds connection to resolve list if needed.
2353
* Setting params to NULL programs local hdev->irk
2354
*/
2355
static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2356
struct conn_params *params)
2357
{
2358
struct hci_cp_le_add_to_resolv_list cp;
2359
struct smp_irk *irk;
2360
struct bdaddr_list_with_irk *entry;
2361
struct hci_conn_params *p;
2362
2363
if (!ll_privacy_capable(hdev))
2364
return 0;
2365
2366
/* Attempt to program local identity address, type and irk if params is
2367
* NULL.
2368
*/
2369
if (!params) {
2370
if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2371
return 0;
2372
2373
hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2374
memcpy(cp.peer_irk, hdev->irk, 16);
2375
goto done;
2376
} else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
2377
return 0;
2378
2379
irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2380
if (!irk)
2381
return 0;
2382
2383
/* Check if the IK has _not_ been programmed yet. */
2384
entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2385
&params->addr,
2386
params->addr_type);
2387
if (entry)
2388
return 0;
2389
2390
cp.bdaddr_type = params->addr_type;
2391
bacpy(&cp.bdaddr, &params->addr);
2392
memcpy(cp.peer_irk, irk->val, 16);
2393
2394
/* Default privacy mode is always Network */
2395
params->privacy_mode = HCI_NETWORK_PRIVACY;
2396
2397
rcu_read_lock();
2398
p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2399
&params->addr, params->addr_type);
2400
if (!p)
2401
p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2402
&params->addr, params->addr_type);
2403
if (p)
2404
WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2405
rcu_read_unlock();
2406
2407
done:
2408
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2409
memcpy(cp.local_irk, hdev->irk, 16);
2410
else
2411
memset(cp.local_irk, 0, 16);
2412
2413
return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2414
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2415
}
2416
2417
/* Set Device Privacy Mode. */
2418
static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2419
struct conn_params *params)
2420
{
2421
struct hci_cp_le_set_privacy_mode cp;
2422
struct smp_irk *irk;
2423
2424
if (!ll_privacy_capable(hdev) ||
2425
!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
2426
return 0;
2427
2428
/* If device privacy mode has already been set there is nothing to do */
2429
if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2430
return 0;
2431
2432
/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2433
* indicates that LL Privacy has been enabled and
2434
* HCI_OP_LE_SET_PRIVACY_MODE is supported.
2435
*/
2436
if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2437
return 0;
2438
2439
irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2440
if (!irk)
2441
return 0;
2442
2443
memset(&cp, 0, sizeof(cp));
2444
cp.bdaddr_type = irk->addr_type;
2445
bacpy(&cp.bdaddr, &irk->bdaddr);
2446
cp.mode = HCI_DEVICE_PRIVACY;
2447
2448
/* Note: params->privacy_mode is not updated since it is a copy */
2449
2450
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2451
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2452
}
2453
2454
/* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2455
* this attempts to program the device in the resolving list as well and
2456
* properly set the privacy mode.
2457
*/
2458
static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2459
struct conn_params *params,
2460
u8 *num_entries)
2461
{
2462
struct hci_cp_le_add_to_accept_list cp;
2463
int err;
2464
2465
/* During suspend, only wakeable devices can be in acceptlist */
2466
if (hdev->suspended &&
2467
!(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2468
hci_le_del_accept_list_sync(hdev, &params->addr,
2469
params->addr_type);
2470
return 0;
2471
}
2472
2473
/* Select filter policy to accept all advertising */
2474
if (*num_entries >= hdev->le_accept_list_size)
2475
return -ENOSPC;
2476
2477
/* Attempt to program the device in the resolving list first to avoid
2478
* having to rollback in case it fails since the resolving list is
2479
* dynamic it can probably be smaller than the accept list.
2480
*/
2481
err = hci_le_add_resolve_list_sync(hdev, params);
2482
if (err) {
2483
bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2484
return err;
2485
}
2486
2487
/* Set Privacy Mode */
2488
err = hci_le_set_privacy_mode_sync(hdev, params);
2489
if (err) {
2490
bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2491
return err;
2492
}
2493
2494
/* Check if already in accept list */
2495
if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2496
params->addr_type))
2497
return 0;
2498
2499
*num_entries += 1;
2500
cp.bdaddr_type = params->addr_type;
2501
bacpy(&cp.bdaddr, &params->addr);
2502
2503
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2504
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2505
if (err) {
2506
bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2507
/* Rollback the device from the resolving list */
2508
hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2509
return err;
2510
}
2511
2512
bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2513
cp.bdaddr_type);
2514
2515
return 0;
2516
}
2517
2518
/* This function disables/pause all advertising instances */
2519
static int hci_pause_advertising_sync(struct hci_dev *hdev)
2520
{
2521
int err;
2522
int old_state;
2523
2524
/* If controller is not advertising we are done. */
2525
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2526
return 0;
2527
2528
/* If already been paused there is nothing to do. */
2529
if (hdev->advertising_paused)
2530
return 0;
2531
2532
bt_dev_dbg(hdev, "Pausing directed advertising");
2533
2534
/* Stop directed advertising */
2535
old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2536
if (old_state) {
2537
/* When discoverable timeout triggers, then just make sure
2538
* the limited discoverable flag is cleared. Even in the case
2539
* of a timeout triggered from general discoverable, it is
2540
* safe to unconditionally clear the flag.
2541
*/
2542
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2543
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2544
hdev->discov_timeout = 0;
2545
}
2546
2547
bt_dev_dbg(hdev, "Pausing advertising instances");
2548
2549
/* Call to disable any advertisements active on the controller.
2550
* This will succeed even if no advertisements are configured.
2551
*/
2552
err = hci_disable_advertising_sync(hdev);
2553
if (err)
2554
return err;
2555
2556
/* If we are using software rotation, pause the loop */
2557
if (!ext_adv_capable(hdev))
2558
cancel_adv_timeout(hdev);
2559
2560
hdev->advertising_paused = true;
2561
hdev->advertising_old_state = old_state;
2562
2563
return 0;
2564
}
2565
2566
/* This function enables all user advertising instances */
2567
static int hci_resume_advertising_sync(struct hci_dev *hdev)
2568
{
2569
struct adv_info *adv, *tmp;
2570
int err;
2571
2572
/* If advertising has not been paused there is nothing to do. */
2573
if (!hdev->advertising_paused)
2574
return 0;
2575
2576
/* Resume directed advertising */
2577
hdev->advertising_paused = false;
2578
if (hdev->advertising_old_state) {
2579
hci_dev_set_flag(hdev, HCI_ADVERTISING);
2580
hdev->advertising_old_state = 0;
2581
}
2582
2583
bt_dev_dbg(hdev, "Resuming advertising instances");
2584
2585
if (ext_adv_capable(hdev)) {
2586
/* Call for each tracked instance to be re-enabled */
2587
list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2588
err = hci_enable_ext_advertising_sync(hdev,
2589
adv->instance);
2590
if (!err)
2591
continue;
2592
2593
/* If the instance cannot be resumed remove it */
2594
hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2595
NULL);
2596
}
2597
} else {
2598
/* Schedule for most recent instance to be restarted and begin
2599
* the software rotation loop
2600
*/
2601
err = hci_schedule_adv_instance_sync(hdev,
2602
hdev->cur_adv_instance,
2603
true);
2604
}
2605
2606
hdev->advertising_paused = false;
2607
2608
return err;
2609
}
2610
2611
static int hci_pause_addr_resolution(struct hci_dev *hdev)
2612
{
2613
int err;
2614
2615
if (!ll_privacy_capable(hdev))
2616
return 0;
2617
2618
if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2619
return 0;
2620
2621
/* Cannot disable addr resolution if scanning is enabled or
2622
* when initiating an LE connection.
2623
*/
2624
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2625
hci_lookup_le_connect(hdev)) {
2626
bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2627
return -EPERM;
2628
}
2629
2630
/* Cannot disable addr resolution if advertising is enabled. */
2631
err = hci_pause_advertising_sync(hdev);
2632
if (err) {
2633
bt_dev_err(hdev, "Pause advertising failed: %d", err);
2634
return err;
2635
}
2636
2637
err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2638
if (err)
2639
bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2640
err);
2641
2642
/* Return if address resolution is disabled and RPA is not used. */
2643
if (!err && scan_use_rpa(hdev))
2644
return 0;
2645
2646
hci_resume_advertising_sync(hdev);
2647
return err;
2648
}
2649
2650
struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2651
bool extended, struct sock *sk)
2652
{
2653
u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2654
HCI_OP_READ_LOCAL_OOB_DATA;
2655
2656
return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2657
}
2658
2659
static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2660
{
2661
struct hci_conn_params *params;
2662
struct conn_params *p;
2663
size_t i;
2664
2665
rcu_read_lock();
2666
2667
i = 0;
2668
list_for_each_entry_rcu(params, list, action)
2669
++i;
2670
*n = i;
2671
2672
rcu_read_unlock();
2673
2674
p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2675
if (!p)
2676
return NULL;
2677
2678
rcu_read_lock();
2679
2680
i = 0;
2681
list_for_each_entry_rcu(params, list, action) {
2682
/* Racing adds are handled in next scan update */
2683
if (i >= *n)
2684
break;
2685
2686
/* No hdev->lock, but: addr, addr_type are immutable.
2687
* privacy_mode is only written by us or in
2688
* hci_cc_le_set_privacy_mode that we wait for.
2689
* We should be idempotent so MGMT updating flags
2690
* while we are processing is OK.
2691
*/
2692
bacpy(&p[i].addr, &params->addr);
2693
p[i].addr_type = params->addr_type;
2694
p[i].flags = READ_ONCE(params->flags);
2695
p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2696
++i;
2697
}
2698
2699
rcu_read_unlock();
2700
2701
*n = i;
2702
return p;
2703
}
2704
2705
/* Clear LE Accept List */
2706
static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2707
{
2708
if (!(hdev->commands[26] & 0x80))
2709
return 0;
2710
2711
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2712
HCI_CMD_TIMEOUT);
2713
}
2714
2715
/* Device must not be scanning when updating the accept list.
2716
*
2717
* Update is done using the following sequence:
2718
*
2719
* ll_privacy_capable((Disable Advertising) -> Disable Resolving List) ->
2720
* Remove Devices From Accept List ->
2721
* (has IRK && ll_privacy_capable(Remove Devices From Resolving List))->
2722
* Add Devices to Accept List ->
2723
* (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) ->
2724
* ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) ->
2725
* Enable Scanning
2726
*
2727
* In case of failure advertising shall be restored to its original state and
2728
* return would disable accept list since either accept or resolving list could
2729
* not be programmed.
2730
*
2731
*/
2732
static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2733
{
2734
struct conn_params *params;
2735
struct bdaddr_list *b, *t;
2736
u8 num_entries = 0;
2737
bool pend_conn, pend_report;
2738
u8 filter_policy;
2739
size_t i, n;
2740
int err;
2741
2742
/* Pause advertising if resolving list can be used as controllers
2743
* cannot accept resolving list modifications while advertising.
2744
*/
2745
if (ll_privacy_capable(hdev)) {
2746
err = hci_pause_advertising_sync(hdev);
2747
if (err) {
2748
bt_dev_err(hdev, "pause advertising failed: %d", err);
2749
return 0x00;
2750
}
2751
}
2752
2753
/* Disable address resolution while reprogramming accept list since
2754
* devices that do have an IRK will be programmed in the resolving list
2755
* when LL Privacy is enabled.
2756
*/
2757
err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2758
if (err) {
2759
bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2760
goto done;
2761
}
2762
2763
/* Force address filtering if PA Sync is in progress */
2764
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2765
struct hci_conn *conn;
2766
2767
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
2768
if (conn) {
2769
struct conn_params pa;
2770
2771
memset(&pa, 0, sizeof(pa));
2772
2773
bacpy(&pa.addr, &conn->dst);
2774
pa.addr_type = conn->dst_type;
2775
2776
/* Clear first since there could be addresses left
2777
* behind.
2778
*/
2779
hci_le_clear_accept_list_sync(hdev);
2780
2781
num_entries = 1;
2782
err = hci_le_add_accept_list_sync(hdev, &pa,
2783
&num_entries);
2784
goto done;
2785
}
2786
}
2787
2788
/* Go through the current accept list programmed into the
2789
* controller one by one and check if that address is connected or is
2790
* still in the list of pending connections or list of devices to
2791
* report. If not present in either list, then remove it from
2792
* the controller.
2793
*/
2794
list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2795
if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2796
continue;
2797
2798
/* Pointers not dereferenced, no locks needed */
2799
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2800
&b->bdaddr,
2801
b->bdaddr_type);
2802
pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2803
&b->bdaddr,
2804
b->bdaddr_type);
2805
2806
/* If the device is not likely to connect or report,
2807
* remove it from the acceptlist.
2808
*/
2809
if (!pend_conn && !pend_report) {
2810
hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2811
b->bdaddr_type);
2812
continue;
2813
}
2814
2815
num_entries++;
2816
}
2817
2818
/* Since all no longer valid accept list entries have been
2819
* removed, walk through the list of pending connections
2820
* and ensure that any new device gets programmed into
2821
* the controller.
2822
*
2823
* If the list of the devices is larger than the list of
2824
* available accept list entries in the controller, then
2825
* just abort and return filer policy value to not use the
2826
* accept list.
2827
*
2828
* The list and params may be mutated while we wait for events,
2829
* so make a copy and iterate it.
2830
*/
2831
2832
params = conn_params_copy(&hdev->pend_le_conns, &n);
2833
if (!params) {
2834
err = -ENOMEM;
2835
goto done;
2836
}
2837
2838
for (i = 0; i < n; ++i) {
2839
err = hci_le_add_accept_list_sync(hdev, &params[i],
2840
&num_entries);
2841
if (err) {
2842
kvfree(params);
2843
goto done;
2844
}
2845
}
2846
2847
kvfree(params);
2848
2849
/* After adding all new pending connections, walk through
2850
* the list of pending reports and also add these to the
2851
* accept list if there is still space. Abort if space runs out.
2852
*/
2853
2854
params = conn_params_copy(&hdev->pend_le_reports, &n);
2855
if (!params) {
2856
err = -ENOMEM;
2857
goto done;
2858
}
2859
2860
for (i = 0; i < n; ++i) {
2861
err = hci_le_add_accept_list_sync(hdev, &params[i],
2862
&num_entries);
2863
if (err) {
2864
kvfree(params);
2865
goto done;
2866
}
2867
}
2868
2869
kvfree(params);
2870
2871
/* Use the allowlist unless the following conditions are all true:
2872
* - We are not currently suspending
2873
* - There are 1 or more ADV monitors registered and it's not offloaded
2874
* - Interleaved scanning is not currently using the allowlist
2875
*/
2876
if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2877
hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2878
hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2879
err = -EINVAL;
2880
2881
done:
2882
filter_policy = err ? 0x00 : 0x01;
2883
2884
/* Enable address resolution when LL Privacy is enabled. */
2885
err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2886
if (err)
2887
bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2888
2889
/* Resume advertising if it was paused */
2890
if (ll_privacy_capable(hdev))
2891
hci_resume_advertising_sync(hdev);
2892
2893
/* Select filter policy to use accept list */
2894
return filter_policy;
2895
}
2896
2897
static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2898
u8 type, u16 interval, u16 window)
2899
{
2900
cp->type = type;
2901
cp->interval = cpu_to_le16(interval);
2902
cp->window = cpu_to_le16(window);
2903
}
2904
2905
static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2906
u16 interval, u16 window,
2907
u8 own_addr_type, u8 filter_policy)
2908
{
2909
struct hci_cp_le_set_ext_scan_params *cp;
2910
struct hci_cp_le_scan_phy_params *phy;
2911
u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2912
u8 num_phy = 0x00;
2913
2914
cp = (void *)data;
2915
phy = (void *)cp->data;
2916
2917
memset(data, 0, sizeof(data));
2918
2919
cp->own_addr_type = own_addr_type;
2920
cp->filter_policy = filter_policy;
2921
2922
/* Check if PA Sync is in progress then select the PHY based on the
2923
* hci_conn.iso_qos.
2924
*/
2925
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2926
struct hci_cp_le_add_to_accept_list *sent;
2927
2928
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2929
if (sent) {
2930
struct hci_conn *conn;
2931
2932
conn = hci_conn_hash_lookup_ba(hdev, PA_LINK,
2933
&sent->bdaddr);
2934
if (conn) {
2935
struct bt_iso_qos *qos = &conn->iso_qos;
2936
2937
if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2938
qos->bcast.in.phy & BT_ISO_PHY_2M) {
2939
cp->scanning_phys |= LE_SCAN_PHY_1M;
2940
hci_le_scan_phy_params(phy, type,
2941
interval,
2942
window);
2943
num_phy++;
2944
phy++;
2945
}
2946
2947
if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2948
cp->scanning_phys |= LE_SCAN_PHY_CODED;
2949
hci_le_scan_phy_params(phy, type,
2950
interval * 3,
2951
window * 3);
2952
num_phy++;
2953
phy++;
2954
}
2955
2956
if (num_phy)
2957
goto done;
2958
}
2959
}
2960
}
2961
2962
if (scan_1m(hdev) || scan_2m(hdev)) {
2963
cp->scanning_phys |= LE_SCAN_PHY_1M;
2964
hci_le_scan_phy_params(phy, type, interval, window);
2965
num_phy++;
2966
phy++;
2967
}
2968
2969
if (scan_coded(hdev)) {
2970
cp->scanning_phys |= LE_SCAN_PHY_CODED;
2971
hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2972
num_phy++;
2973
phy++;
2974
}
2975
2976
done:
2977
if (!num_phy)
2978
return -EINVAL;
2979
2980
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2981
sizeof(*cp) + sizeof(*phy) * num_phy,
2982
data, HCI_CMD_TIMEOUT);
2983
}
2984
2985
static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2986
u16 interval, u16 window,
2987
u8 own_addr_type, u8 filter_policy)
2988
{
2989
struct hci_cp_le_set_scan_param cp;
2990
2991
if (use_ext_scan(hdev))
2992
return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2993
window, own_addr_type,
2994
filter_policy);
2995
2996
memset(&cp, 0, sizeof(cp));
2997
cp.type = type;
2998
cp.interval = cpu_to_le16(interval);
2999
cp.window = cpu_to_le16(window);
3000
cp.own_address_type = own_addr_type;
3001
cp.filter_policy = filter_policy;
3002
3003
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
3004
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3005
}
3006
3007
static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
3008
u16 window, u8 own_addr_type, u8 filter_policy,
3009
u8 filter_dup)
3010
{
3011
int err;
3012
3013
if (hdev->scanning_paused) {
3014
bt_dev_dbg(hdev, "Scanning is paused for suspend");
3015
return 0;
3016
}
3017
3018
err = hci_le_set_scan_param_sync(hdev, type, interval, window,
3019
own_addr_type, filter_policy);
3020
if (err)
3021
return err;
3022
3023
return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
3024
}
3025
3026
static int hci_passive_scan_sync(struct hci_dev *hdev)
3027
{
3028
u8 own_addr_type;
3029
u8 filter_policy;
3030
u16 window, interval;
3031
u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
3032
int err;
3033
3034
if (hdev->scanning_paused) {
3035
bt_dev_dbg(hdev, "Scanning is paused for suspend");
3036
return 0;
3037
}
3038
3039
err = hci_scan_disable_sync(hdev);
3040
if (err) {
3041
bt_dev_err(hdev, "disable scanning failed: %d", err);
3042
return err;
3043
}
3044
3045
/* Set require_privacy to false since no SCAN_REQ are send
3046
* during passive scanning. Not using an non-resolvable address
3047
* here is important so that peer devices using direct
3048
* advertising with our address will be correctly reported
3049
* by the controller.
3050
*/
3051
if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
3052
&own_addr_type))
3053
return 0;
3054
3055
if (hdev->enable_advmon_interleave_scan &&
3056
hci_update_interleaved_scan_sync(hdev))
3057
return 0;
3058
3059
bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
3060
3061
/* Adding or removing entries from the accept list must
3062
* happen before enabling scanning. The controller does
3063
* not allow accept list modification while scanning.
3064
*/
3065
filter_policy = hci_update_accept_list_sync(hdev);
3066
3067
/* If suspended and filter_policy set to 0x00 (no acceptlist) then
3068
* passive scanning cannot be started since that would require the host
3069
* to be woken up to process the reports.
3070
*/
3071
if (hdev->suspended && !filter_policy) {
3072
/* Check if accept list is empty then there is no need to scan
3073
* while suspended.
3074
*/
3075
if (list_empty(&hdev->le_accept_list))
3076
return 0;
3077
3078
/* If there are devices is the accept_list that means some
3079
* devices could not be programmed which in non-suspended case
3080
* means filter_policy needs to be set to 0x00 so the host needs
3081
* to filter, but since this is treating suspended case we
3082
* can ignore device needing host to filter to allow devices in
3083
* the acceptlist to be able to wakeup the system.
3084
*/
3085
filter_policy = 0x01;
3086
}
3087
3088
/* When the controller is using random resolvable addresses and
3089
* with that having LE privacy enabled, then controllers with
3090
* Extended Scanner Filter Policies support can now enable support
3091
* for handling directed advertising.
3092
*
3093
* So instead of using filter polices 0x00 (no acceptlist)
3094
* and 0x01 (acceptlist enabled) use the new filter policies
3095
* 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3096
*/
3097
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3098
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3099
filter_policy |= 0x02;
3100
3101
if (hdev->suspended) {
3102
window = hdev->le_scan_window_suspend;
3103
interval = hdev->le_scan_int_suspend;
3104
} else if (hci_is_le_conn_scanning(hdev)) {
3105
window = hdev->le_scan_window_connect;
3106
interval = hdev->le_scan_int_connect;
3107
} else if (hci_is_adv_monitoring(hdev)) {
3108
window = hdev->le_scan_window_adv_monitor;
3109
interval = hdev->le_scan_int_adv_monitor;
3110
3111
/* Disable duplicates filter when scanning for advertisement
3112
* monitor for the following reasons.
3113
*
3114
* For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
3115
* controllers ignore RSSI_Sampling_Period when the duplicates
3116
* filter is enabled.
3117
*
3118
* For SW pattern filtering, when we're not doing interleaved
3119
* scanning, it is necessary to disable duplicates filter,
3120
* otherwise hosts can only receive one advertisement and it's
3121
* impossible to know if a peer is still in range.
3122
*/
3123
filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3124
} else {
3125
window = hdev->le_scan_window;
3126
interval = hdev->le_scan_interval;
3127
}
3128
3129
/* Disable all filtering for Mesh */
3130
if (hci_dev_test_flag(hdev, HCI_MESH)) {
3131
filter_policy = 0;
3132
filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3133
}
3134
3135
bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3136
3137
return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3138
own_addr_type, filter_policy, filter_dups);
3139
}
3140
3141
/* This function controls the passive scanning based on hdev->pend_le_conns
3142
* list. If there are pending LE connection we start the background scanning,
3143
* otherwise we stop it in the following sequence:
3144
*
3145
* If there are devices to scan:
3146
*
3147
* Disable Scanning -> Update Accept List ->
3148
* ll_privacy_capable((Disable Advertising) -> Disable Resolving List ->
3149
* Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3150
* Enable Scanning
3151
*
3152
* Otherwise:
3153
*
3154
* Disable Scanning
3155
*/
3156
int hci_update_passive_scan_sync(struct hci_dev *hdev)
3157
{
3158
int err;
3159
3160
if (!test_bit(HCI_UP, &hdev->flags) ||
3161
test_bit(HCI_INIT, &hdev->flags) ||
3162
hci_dev_test_flag(hdev, HCI_SETUP) ||
3163
hci_dev_test_flag(hdev, HCI_CONFIG) ||
3164
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3165
hci_dev_test_flag(hdev, HCI_UNREGISTER))
3166
return 0;
3167
3168
/* No point in doing scanning if LE support hasn't been enabled */
3169
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3170
return 0;
3171
3172
/* If discovery is active don't interfere with it */
3173
if (hdev->discovery.state != DISCOVERY_STOPPED)
3174
return 0;
3175
3176
/* Reset RSSI and UUID filters when starting background scanning
3177
* since these filters are meant for service discovery only.
3178
*
3179
* The Start Discovery and Start Service Discovery operations
3180
* ensure to set proper values for RSSI threshold and UUID
3181
* filter list. So it is safe to just reset them here.
3182
*/
3183
hci_discovery_filter_clear(hdev);
3184
3185
bt_dev_dbg(hdev, "ADV monitoring is %s",
3186
hci_is_adv_monitoring(hdev) ? "on" : "off");
3187
3188
if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3189
list_empty(&hdev->pend_le_conns) &&
3190
list_empty(&hdev->pend_le_reports) &&
3191
!hci_is_adv_monitoring(hdev) &&
3192
!hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3193
/* If there is no pending LE connections or devices
3194
* to be scanned for or no ADV monitors, we should stop the
3195
* background scanning.
3196
*/
3197
3198
bt_dev_dbg(hdev, "stopping background scanning");
3199
3200
err = hci_scan_disable_sync(hdev);
3201
if (err)
3202
bt_dev_err(hdev, "stop background scanning failed: %d",
3203
err);
3204
} else {
3205
/* If there is at least one pending LE connection, we should
3206
* keep the background scan running.
3207
*/
3208
3209
/* If controller is connecting, we should not start scanning
3210
* since some controllers are not able to scan and connect at
3211
* the same time.
3212
*/
3213
if (hci_lookup_le_connect(hdev))
3214
return 0;
3215
3216
bt_dev_dbg(hdev, "start background scanning");
3217
3218
err = hci_passive_scan_sync(hdev);
3219
if (err)
3220
bt_dev_err(hdev, "start background scanning failed: %d",
3221
err);
3222
}
3223
3224
return err;
3225
}
3226
3227
static int update_scan_sync(struct hci_dev *hdev, void *data)
3228
{
3229
return hci_update_scan_sync(hdev);
3230
}
3231
3232
int hci_update_scan(struct hci_dev *hdev)
3233
{
3234
return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3235
}
3236
3237
static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3238
{
3239
return hci_update_passive_scan_sync(hdev);
3240
}
3241
3242
int hci_update_passive_scan(struct hci_dev *hdev)
3243
{
3244
/* Only queue if it would have any effect */
3245
if (!test_bit(HCI_UP, &hdev->flags) ||
3246
test_bit(HCI_INIT, &hdev->flags) ||
3247
hci_dev_test_flag(hdev, HCI_SETUP) ||
3248
hci_dev_test_flag(hdev, HCI_CONFIG) ||
3249
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3250
hci_dev_test_flag(hdev, HCI_UNREGISTER))
3251
return 0;
3252
3253
return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3254
NULL);
3255
}
3256
3257
int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3258
{
3259
int err;
3260
3261
if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3262
return 0;
3263
3264
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3265
sizeof(val), &val, HCI_CMD_TIMEOUT);
3266
3267
if (!err) {
3268
if (val) {
3269
hdev->features[1][0] |= LMP_HOST_SC;
3270
hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3271
} else {
3272
hdev->features[1][0] &= ~LMP_HOST_SC;
3273
hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3274
}
3275
}
3276
3277
return err;
3278
}
3279
3280
int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3281
{
3282
int err;
3283
3284
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3285
lmp_host_ssp_capable(hdev))
3286
return 0;
3287
3288
if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3289
__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3290
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3291
}
3292
3293
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3294
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3295
if (err)
3296
return err;
3297
3298
return hci_write_sc_support_sync(hdev, 0x01);
3299
}
3300
3301
int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3302
{
3303
struct hci_cp_write_le_host_supported cp;
3304
3305
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3306
!lmp_bredr_capable(hdev))
3307
return 0;
3308
3309
/* Check first if we already have the right host state
3310
* (host features set)
3311
*/
3312
if (le == lmp_host_le_capable(hdev) &&
3313
simul == lmp_host_le_br_capable(hdev))
3314
return 0;
3315
3316
memset(&cp, 0, sizeof(cp));
3317
3318
cp.le = le;
3319
cp.simul = simul;
3320
3321
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3322
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3323
}
3324
3325
static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3326
{
3327
struct adv_info *adv, *tmp;
3328
int err;
3329
3330
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3331
return 0;
3332
3333
/* If RPA Resolution has not been enable yet it means the
3334
* resolving list is empty and we should attempt to program the
3335
* local IRK in order to support using own_addr_type
3336
* ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3337
*/
3338
if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3339
hci_le_add_resolve_list_sync(hdev, NULL);
3340
hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3341
}
3342
3343
/* Make sure the controller has a good default for
3344
* advertising data. This also applies to the case
3345
* where BR/EDR was toggled during the AUTO_OFF phase.
3346
*/
3347
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
3348
list_empty(&hdev->adv_instances)) {
3349
if (ext_adv_capable(hdev)) {
3350
err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3351
if (!err)
3352
hci_update_scan_rsp_data_sync(hdev, 0x00);
3353
} else {
3354
err = hci_update_adv_data_sync(hdev, 0x00);
3355
if (!err)
3356
hci_update_scan_rsp_data_sync(hdev, 0x00);
3357
}
3358
3359
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3360
hci_enable_advertising_sync(hdev);
3361
}
3362
3363
/* Call for each tracked instance to be scheduled */
3364
list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3365
hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3366
3367
return 0;
3368
}
3369
3370
static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3371
{
3372
u8 link_sec;
3373
3374
link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3375
if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3376
return 0;
3377
3378
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3379
sizeof(link_sec), &link_sec,
3380
HCI_CMD_TIMEOUT);
3381
}
3382
3383
int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3384
{
3385
struct hci_cp_write_page_scan_activity cp;
3386
u8 type;
3387
int err = 0;
3388
3389
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3390
return 0;
3391
3392
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3393
return 0;
3394
3395
memset(&cp, 0, sizeof(cp));
3396
3397
if (enable) {
3398
type = PAGE_SCAN_TYPE_INTERLACED;
3399
3400
/* 160 msec page scan interval */
3401
cp.interval = cpu_to_le16(0x0100);
3402
} else {
3403
type = hdev->def_page_scan_type;
3404
cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3405
}
3406
3407
cp.window = cpu_to_le16(hdev->def_page_scan_window);
3408
3409
if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3410
__cpu_to_le16(hdev->page_scan_window) != cp.window) {
3411
err = __hci_cmd_sync_status(hdev,
3412
HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3413
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3414
if (err)
3415
return err;
3416
}
3417
3418
if (hdev->page_scan_type != type)
3419
err = __hci_cmd_sync_status(hdev,
3420
HCI_OP_WRITE_PAGE_SCAN_TYPE,
3421
sizeof(type), &type,
3422
HCI_CMD_TIMEOUT);
3423
3424
return err;
3425
}
3426
3427
static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3428
{
3429
struct bdaddr_list *b;
3430
3431
list_for_each_entry(b, &hdev->accept_list, list) {
3432
struct hci_conn *conn;
3433
3434
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3435
if (!conn)
3436
return true;
3437
3438
if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3439
return true;
3440
}
3441
3442
return false;
3443
}
3444
3445
static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3446
{
3447
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3448
sizeof(val), &val,
3449
HCI_CMD_TIMEOUT);
3450
}
3451
3452
int hci_update_scan_sync(struct hci_dev *hdev)
3453
{
3454
u8 scan;
3455
3456
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3457
return 0;
3458
3459
if (!hdev_is_powered(hdev))
3460
return 0;
3461
3462
if (mgmt_powering_down(hdev))
3463
return 0;
3464
3465
if (hdev->scanning_paused)
3466
return 0;
3467
3468
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3469
disconnected_accept_list_entries(hdev))
3470
scan = SCAN_PAGE;
3471
else
3472
scan = SCAN_DISABLED;
3473
3474
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3475
scan |= SCAN_INQUIRY;
3476
3477
if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3478
test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3479
return 0;
3480
3481
return hci_write_scan_enable_sync(hdev, scan);
3482
}
3483
3484
int hci_update_name_sync(struct hci_dev *hdev, const u8 *name)
3485
{
3486
struct hci_cp_write_local_name cp;
3487
3488
memset(&cp, 0, sizeof(cp));
3489
3490
memcpy(cp.name, name, sizeof(cp.name));
3491
3492
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3493
sizeof(cp), &cp,
3494
HCI_CMD_TIMEOUT);
3495
}
3496
3497
/* This function perform powered update HCI command sequence after the HCI init
3498
* sequence which end up resetting all states, the sequence is as follows:
3499
*
3500
* HCI_SSP_ENABLED(Enable SSP)
3501
* HCI_LE_ENABLED(Enable LE)
3502
* HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) ->
3503
* Update adv data)
3504
* Enable Authentication
3505
* lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3506
* Set Name -> Set EIR)
3507
* HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3508
*/
3509
int hci_powered_update_sync(struct hci_dev *hdev)
3510
{
3511
int err;
3512
3513
/* Register the available SMP channels (BR/EDR and LE) only when
3514
* successfully powering on the controller. This late
3515
* registration is required so that LE SMP can clearly decide if
3516
* the public address or static address is used.
3517
*/
3518
smp_register(hdev);
3519
3520
err = hci_write_ssp_mode_sync(hdev, 0x01);
3521
if (err)
3522
return err;
3523
3524
err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3525
if (err)
3526
return err;
3527
3528
err = hci_powered_update_adv_sync(hdev);
3529
if (err)
3530
return err;
3531
3532
err = hci_write_auth_enable_sync(hdev);
3533
if (err)
3534
return err;
3535
3536
if (lmp_bredr_capable(hdev)) {
3537
if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3538
hci_write_fast_connectable_sync(hdev, true);
3539
else
3540
hci_write_fast_connectable_sync(hdev, false);
3541
hci_update_scan_sync(hdev);
3542
hci_update_class_sync(hdev);
3543
hci_update_name_sync(hdev, hdev->dev_name);
3544
hci_update_eir_sync(hdev);
3545
}
3546
3547
/* If forcing static address is in use or there is no public
3548
* address use the static address as random address (but skip
3549
* the HCI command if the current random address is already the
3550
* static one.
3551
*
3552
* In case BR/EDR has been disabled on a dual-mode controller
3553
* and a static address has been configured, then use that
3554
* address instead of the public BR/EDR address.
3555
*/
3556
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3557
(!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3558
!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3559
if (bacmp(&hdev->static_addr, BDADDR_ANY))
3560
return hci_set_random_addr_sync(hdev,
3561
&hdev->static_addr);
3562
}
3563
3564
return 0;
3565
}
3566
3567
/**
3568
* hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3569
* (BD_ADDR) for a HCI device from
3570
* a firmware node property.
3571
* @hdev: The HCI device
3572
*
3573
* Search the firmware node for 'local-bd-address'.
3574
*
3575
* All-zero BD addresses are rejected, because those could be properties
3576
* that exist in the firmware tables, but were not updated by the firmware. For
3577
* example, the DTS could define 'local-bd-address', with zero BD addresses.
3578
*/
3579
static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3580
{
3581
struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3582
bdaddr_t ba;
3583
int ret;
3584
3585
ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3586
(u8 *)&ba, sizeof(ba));
3587
if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3588
return;
3589
3590
if (hci_test_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN))
3591
baswap(&hdev->public_addr, &ba);
3592
else
3593
bacpy(&hdev->public_addr, &ba);
3594
}
3595
3596
struct hci_init_stage {
3597
int (*func)(struct hci_dev *hdev);
3598
};
3599
3600
/* Run init stage NULL terminated function table */
3601
static int hci_init_stage_sync(struct hci_dev *hdev,
3602
const struct hci_init_stage *stage)
3603
{
3604
size_t i;
3605
3606
for (i = 0; stage[i].func; i++) {
3607
int err;
3608
3609
err = stage[i].func(hdev);
3610
if (err)
3611
return err;
3612
}
3613
3614
return 0;
3615
}
3616
3617
/* Read Local Version */
3618
static int hci_read_local_version_sync(struct hci_dev *hdev)
3619
{
3620
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3621
0, NULL, HCI_CMD_TIMEOUT);
3622
}
3623
3624
/* Read BD Address */
3625
static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3626
{
3627
return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3628
0, NULL, HCI_CMD_TIMEOUT);
3629
}
3630
3631
#define HCI_INIT(_func) \
3632
{ \
3633
.func = _func, \
3634
}
3635
3636
static const struct hci_init_stage hci_init0[] = {
3637
/* HCI_OP_READ_LOCAL_VERSION */
3638
HCI_INIT(hci_read_local_version_sync),
3639
/* HCI_OP_READ_BD_ADDR */
3640
HCI_INIT(hci_read_bd_addr_sync),
3641
{}
3642
};
3643
3644
int hci_reset_sync(struct hci_dev *hdev)
3645
{
3646
int err;
3647
3648
set_bit(HCI_RESET, &hdev->flags);
3649
3650
err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3651
HCI_CMD_TIMEOUT);
3652
if (err)
3653
return err;
3654
3655
return 0;
3656
}
3657
3658
static int hci_init0_sync(struct hci_dev *hdev)
3659
{
3660
int err;
3661
3662
bt_dev_dbg(hdev, "");
3663
3664
/* Reset */
3665
if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) {
3666
err = hci_reset_sync(hdev);
3667
if (err)
3668
return err;
3669
}
3670
3671
return hci_init_stage_sync(hdev, hci_init0);
3672
}
3673
3674
static int hci_unconf_init_sync(struct hci_dev *hdev)
3675
{
3676
int err;
3677
3678
if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
3679
return 0;
3680
3681
err = hci_init0_sync(hdev);
3682
if (err < 0)
3683
return err;
3684
3685
if (hci_dev_test_flag(hdev, HCI_SETUP))
3686
hci_debugfs_create_basic(hdev);
3687
3688
return 0;
3689
}
3690
3691
/* Read Local Supported Features. */
3692
static int hci_read_local_features_sync(struct hci_dev *hdev)
3693
{
3694
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3695
0, NULL, HCI_CMD_TIMEOUT);
3696
}
3697
3698
/* BR Controller init stage 1 command sequence */
3699
static const struct hci_init_stage br_init1[] = {
3700
/* HCI_OP_READ_LOCAL_FEATURES */
3701
HCI_INIT(hci_read_local_features_sync),
3702
/* HCI_OP_READ_LOCAL_VERSION */
3703
HCI_INIT(hci_read_local_version_sync),
3704
/* HCI_OP_READ_BD_ADDR */
3705
HCI_INIT(hci_read_bd_addr_sync),
3706
{}
3707
};
3708
3709
/* Read Local Commands */
3710
static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3711
{
3712
/* All Bluetooth 1.2 and later controllers should support the
3713
* HCI command for reading the local supported commands.
3714
*
3715
* Unfortunately some controllers indicate Bluetooth 1.2 support,
3716
* but do not have support for this command. If that is the case,
3717
* the driver can quirk the behavior and skip reading the local
3718
* supported commands.
3719
*/
3720
if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3721
!hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS))
3722
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3723
0, NULL, HCI_CMD_TIMEOUT);
3724
3725
return 0;
3726
}
3727
3728
static int hci_init1_sync(struct hci_dev *hdev)
3729
{
3730
int err;
3731
3732
bt_dev_dbg(hdev, "");
3733
3734
/* Reset */
3735
if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) {
3736
err = hci_reset_sync(hdev);
3737
if (err)
3738
return err;
3739
}
3740
3741
return hci_init_stage_sync(hdev, br_init1);
3742
}
3743
3744
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
3745
static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3746
{
3747
return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3748
0, NULL, HCI_CMD_TIMEOUT);
3749
}
3750
3751
/* Read Class of Device */
3752
static int hci_read_dev_class_sync(struct hci_dev *hdev)
3753
{
3754
return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3755
0, NULL, HCI_CMD_TIMEOUT);
3756
}
3757
3758
/* Read Local Name */
3759
static int hci_read_local_name_sync(struct hci_dev *hdev)
3760
{
3761
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3762
0, NULL, HCI_CMD_TIMEOUT);
3763
}
3764
3765
/* Read Voice Setting */
3766
static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3767
{
3768
if (!read_voice_setting_capable(hdev))
3769
return 0;
3770
3771
return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3772
0, NULL, HCI_CMD_TIMEOUT);
3773
}
3774
3775
/* Read Number of Supported IAC */
3776
static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3777
{
3778
return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3779
0, NULL, HCI_CMD_TIMEOUT);
3780
}
3781
3782
/* Read Current IAC LAP */
3783
static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3784
{
3785
return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3786
0, NULL, HCI_CMD_TIMEOUT);
3787
}
3788
3789
static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3790
u8 cond_type, bdaddr_t *bdaddr,
3791
u8 auto_accept)
3792
{
3793
struct hci_cp_set_event_filter cp;
3794
3795
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3796
return 0;
3797
3798
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
3799
return 0;
3800
3801
memset(&cp, 0, sizeof(cp));
3802
cp.flt_type = flt_type;
3803
3804
if (flt_type != HCI_FLT_CLEAR_ALL) {
3805
cp.cond_type = cond_type;
3806
bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3807
cp.addr_conn_flt.auto_accept = auto_accept;
3808
}
3809
3810
return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3811
flt_type == HCI_FLT_CLEAR_ALL ?
3812
sizeof(cp.flt_type) : sizeof(cp), &cp,
3813
HCI_CMD_TIMEOUT);
3814
}
3815
3816
static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3817
{
3818
if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3819
return 0;
3820
3821
/* In theory the state machine should not reach here unless
3822
* a hci_set_event_filter_sync() call succeeds, but we do
3823
* the check both for parity and as a future reminder.
3824
*/
3825
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
3826
return 0;
3827
3828
return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3829
BDADDR_ANY, 0x00);
3830
}
3831
3832
/* Connection accept timeout ~20 secs */
3833
static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3834
{
3835
__le16 param = cpu_to_le16(0x7d00);
3836
3837
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3838
sizeof(param), &param, HCI_CMD_TIMEOUT);
3839
}
3840
3841
/* Enable SCO flow control if supported */
3842
static int hci_write_sync_flowctl_sync(struct hci_dev *hdev)
3843
{
3844
struct hci_cp_write_sync_flowctl cp;
3845
int err;
3846
3847
/* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */
3848
if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) ||
3849
!hci_test_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED))
3850
return 0;
3851
3852
memset(&cp, 0, sizeof(cp));
3853
cp.enable = 0x01;
3854
3855
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL,
3856
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3857
if (!err)
3858
hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL);
3859
3860
return err;
3861
}
3862
3863
/* BR Controller init stage 2 command sequence */
3864
static const struct hci_init_stage br_init2[] = {
3865
/* HCI_OP_READ_BUFFER_SIZE */
3866
HCI_INIT(hci_read_buffer_size_sync),
3867
/* HCI_OP_READ_CLASS_OF_DEV */
3868
HCI_INIT(hci_read_dev_class_sync),
3869
/* HCI_OP_READ_LOCAL_NAME */
3870
HCI_INIT(hci_read_local_name_sync),
3871
/* HCI_OP_READ_VOICE_SETTING */
3872
HCI_INIT(hci_read_voice_setting_sync),
3873
/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3874
HCI_INIT(hci_read_num_supported_iac_sync),
3875
/* HCI_OP_READ_CURRENT_IAC_LAP */
3876
HCI_INIT(hci_read_current_iac_lap_sync),
3877
/* HCI_OP_SET_EVENT_FLT */
3878
HCI_INIT(hci_clear_event_filter_sync),
3879
/* HCI_OP_WRITE_CA_TIMEOUT */
3880
HCI_INIT(hci_write_ca_timeout_sync),
3881
/* HCI_OP_WRITE_SYNC_FLOWCTL */
3882
HCI_INIT(hci_write_sync_flowctl_sync),
3883
{}
3884
};
3885
3886
static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3887
{
3888
u8 mode = 0x01;
3889
3890
if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3891
return 0;
3892
3893
/* When SSP is available, then the host features page
3894
* should also be available as well. However some
3895
* controllers list the max_page as 0 as long as SSP
3896
* has not been enabled. To achieve proper debugging
3897
* output, force the minimum max_page to 1 at least.
3898
*/
3899
hdev->max_page = 0x01;
3900
3901
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3902
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3903
}
3904
3905
static int hci_write_eir_sync(struct hci_dev *hdev)
3906
{
3907
struct hci_cp_write_eir cp;
3908
3909
if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3910
return 0;
3911
3912
memset(hdev->eir, 0, sizeof(hdev->eir));
3913
memset(&cp, 0, sizeof(cp));
3914
3915
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3916
HCI_CMD_TIMEOUT);
3917
}
3918
3919
static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3920
{
3921
u8 mode;
3922
3923
if (!lmp_inq_rssi_capable(hdev) &&
3924
!hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE))
3925
return 0;
3926
3927
/* If Extended Inquiry Result events are supported, then
3928
* they are clearly preferred over Inquiry Result with RSSI
3929
* events.
3930
*/
3931
mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3932
3933
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3934
sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3935
}
3936
3937
static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3938
{
3939
if (!lmp_inq_tx_pwr_capable(hdev))
3940
return 0;
3941
3942
return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3943
0, NULL, HCI_CMD_TIMEOUT);
3944
}
3945
3946
static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3947
{
3948
struct hci_cp_read_local_ext_features cp;
3949
3950
if (!lmp_ext_feat_capable(hdev))
3951
return 0;
3952
3953
memset(&cp, 0, sizeof(cp));
3954
cp.page = page;
3955
3956
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3957
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3958
}
3959
3960
static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3961
{
3962
return hci_read_local_ext_features_sync(hdev, 0x01);
3963
}
3964
3965
/* HCI Controller init stage 2 command sequence */
3966
static const struct hci_init_stage hci_init2[] = {
3967
/* HCI_OP_READ_LOCAL_COMMANDS */
3968
HCI_INIT(hci_read_local_cmds_sync),
3969
/* HCI_OP_WRITE_SSP_MODE */
3970
HCI_INIT(hci_write_ssp_mode_1_sync),
3971
/* HCI_OP_WRITE_EIR */
3972
HCI_INIT(hci_write_eir_sync),
3973
/* HCI_OP_WRITE_INQUIRY_MODE */
3974
HCI_INIT(hci_write_inquiry_mode_sync),
3975
/* HCI_OP_READ_INQ_RSP_TX_POWER */
3976
HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3977
/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3978
HCI_INIT(hci_read_local_ext_features_1_sync),
3979
/* HCI_OP_WRITE_AUTH_ENABLE */
3980
HCI_INIT(hci_write_auth_enable_sync),
3981
{}
3982
};
3983
3984
/* Read LE Buffer Size */
3985
static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3986
{
3987
/* Use Read LE Buffer Size V2 if supported */
3988
if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3989
return __hci_cmd_sync_status(hdev,
3990
HCI_OP_LE_READ_BUFFER_SIZE_V2,
3991
0, NULL, HCI_CMD_TIMEOUT);
3992
3993
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3994
0, NULL, HCI_CMD_TIMEOUT);
3995
}
3996
3997
/* Read LE Local Supported Features */
3998
static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3999
{
4000
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
4001
0, NULL, HCI_CMD_TIMEOUT);
4002
}
4003
4004
/* Read LE Supported States */
4005
static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
4006
{
4007
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
4008
0, NULL, HCI_CMD_TIMEOUT);
4009
}
4010
4011
/* LE Controller init stage 2 command sequence */
4012
static const struct hci_init_stage le_init2[] = {
4013
/* HCI_OP_LE_READ_LOCAL_FEATURES */
4014
HCI_INIT(hci_le_read_local_features_sync),
4015
/* HCI_OP_LE_READ_BUFFER_SIZE */
4016
HCI_INIT(hci_le_read_buffer_size_sync),
4017
/* HCI_OP_LE_READ_SUPPORTED_STATES */
4018
HCI_INIT(hci_le_read_supported_states_sync),
4019
{}
4020
};
4021
4022
static int hci_init2_sync(struct hci_dev *hdev)
4023
{
4024
int err;
4025
4026
bt_dev_dbg(hdev, "");
4027
4028
err = hci_init_stage_sync(hdev, hci_init2);
4029
if (err)
4030
return err;
4031
4032
if (lmp_bredr_capable(hdev)) {
4033
err = hci_init_stage_sync(hdev, br_init2);
4034
if (err)
4035
return err;
4036
} else {
4037
hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4038
}
4039
4040
if (lmp_le_capable(hdev)) {
4041
err = hci_init_stage_sync(hdev, le_init2);
4042
if (err)
4043
return err;
4044
/* LE-only controllers have LE implicitly enabled */
4045
if (!lmp_bredr_capable(hdev))
4046
hci_dev_set_flag(hdev, HCI_LE_ENABLED);
4047
}
4048
4049
return 0;
4050
}
4051
4052
static int hci_set_event_mask_sync(struct hci_dev *hdev)
4053
{
4054
/* The second byte is 0xff instead of 0x9f (two reserved bits
4055
* disabled) since a Broadcom 1.2 dongle doesn't respond to the
4056
* command otherwise.
4057
*/
4058
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
4059
4060
/* CSR 1.1 dongles does not accept any bitfield so don't try to set
4061
* any event mask for pre 1.2 devices.
4062
*/
4063
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
4064
return 0;
4065
4066
if (lmp_bredr_capable(hdev)) {
4067
events[4] |= 0x01; /* Flow Specification Complete */
4068
4069
/* Don't set Disconnect Complete and mode change when
4070
* suspended as that would wakeup the host when disconnecting
4071
* due to suspend.
4072
*/
4073
if (hdev->suspended) {
4074
events[0] &= 0xef;
4075
events[2] &= 0xf7;
4076
}
4077
} else {
4078
/* Use a different default for LE-only devices */
4079
memset(events, 0, sizeof(events));
4080
events[1] |= 0x20; /* Command Complete */
4081
events[1] |= 0x40; /* Command Status */
4082
events[1] |= 0x80; /* Hardware Error */
4083
4084
/* If the controller supports the Disconnect command, enable
4085
* the corresponding event. In addition enable packet flow
4086
* control related events.
4087
*/
4088
if (hdev->commands[0] & 0x20) {
4089
/* Don't set Disconnect Complete when suspended as that
4090
* would wakeup the host when disconnecting due to
4091
* suspend.
4092
*/
4093
if (!hdev->suspended)
4094
events[0] |= 0x10; /* Disconnection Complete */
4095
events[2] |= 0x04; /* Number of Completed Packets */
4096
events[3] |= 0x02; /* Data Buffer Overflow */
4097
}
4098
4099
/* If the controller supports the Read Remote Version
4100
* Information command, enable the corresponding event.
4101
*/
4102
if (hdev->commands[2] & 0x80)
4103
events[1] |= 0x08; /* Read Remote Version Information
4104
* Complete
4105
*/
4106
4107
if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
4108
events[0] |= 0x80; /* Encryption Change */
4109
events[5] |= 0x80; /* Encryption Key Refresh Complete */
4110
}
4111
}
4112
4113
if (lmp_inq_rssi_capable(hdev) ||
4114
hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE))
4115
events[4] |= 0x02; /* Inquiry Result with RSSI */
4116
4117
if (lmp_ext_feat_capable(hdev))
4118
events[4] |= 0x04; /* Read Remote Extended Features Complete */
4119
4120
if (lmp_esco_capable(hdev)) {
4121
events[5] |= 0x08; /* Synchronous Connection Complete */
4122
events[5] |= 0x10; /* Synchronous Connection Changed */
4123
}
4124
4125
if (lmp_sniffsubr_capable(hdev))
4126
events[5] |= 0x20; /* Sniff Subrating */
4127
4128
if (lmp_pause_enc_capable(hdev))
4129
events[5] |= 0x80; /* Encryption Key Refresh Complete */
4130
4131
if (lmp_ext_inq_capable(hdev))
4132
events[5] |= 0x40; /* Extended Inquiry Result */
4133
4134
if (lmp_no_flush_capable(hdev))
4135
events[7] |= 0x01; /* Enhanced Flush Complete */
4136
4137
if (lmp_lsto_capable(hdev))
4138
events[6] |= 0x80; /* Link Supervision Timeout Changed */
4139
4140
if (lmp_ssp_capable(hdev)) {
4141
events[6] |= 0x01; /* IO Capability Request */
4142
events[6] |= 0x02; /* IO Capability Response */
4143
events[6] |= 0x04; /* User Confirmation Request */
4144
events[6] |= 0x08; /* User Passkey Request */
4145
events[6] |= 0x10; /* Remote OOB Data Request */
4146
events[6] |= 0x20; /* Simple Pairing Complete */
4147
events[7] |= 0x04; /* User Passkey Notification */
4148
events[7] |= 0x08; /* Keypress Notification */
4149
events[7] |= 0x10; /* Remote Host Supported
4150
* Features Notification
4151
*/
4152
}
4153
4154
if (lmp_le_capable(hdev))
4155
events[7] |= 0x20; /* LE Meta-Event */
4156
4157
return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4158
sizeof(events), events, HCI_CMD_TIMEOUT);
4159
}
4160
4161
static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4162
{
4163
struct hci_cp_read_stored_link_key cp;
4164
4165
if (!(hdev->commands[6] & 0x20) ||
4166
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY))
4167
return 0;
4168
4169
memset(&cp, 0, sizeof(cp));
4170
bacpy(&cp.bdaddr, BDADDR_ANY);
4171
cp.read_all = 0x01;
4172
4173
return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4174
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4175
}
4176
4177
static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4178
{
4179
struct hci_cp_write_def_link_policy cp;
4180
u16 link_policy = 0;
4181
4182
if (!(hdev->commands[5] & 0x10))
4183
return 0;
4184
4185
memset(&cp, 0, sizeof(cp));
4186
4187
if (lmp_rswitch_capable(hdev))
4188
link_policy |= HCI_LP_RSWITCH;
4189
if (lmp_hold_capable(hdev))
4190
link_policy |= HCI_LP_HOLD;
4191
if (lmp_sniff_capable(hdev))
4192
link_policy |= HCI_LP_SNIFF;
4193
if (lmp_park_capable(hdev))
4194
link_policy |= HCI_LP_PARK;
4195
4196
cp.policy = cpu_to_le16(link_policy);
4197
4198
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4199
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4200
}
4201
4202
static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4203
{
4204
if (!(hdev->commands[8] & 0x01))
4205
return 0;
4206
4207
return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4208
0, NULL, HCI_CMD_TIMEOUT);
4209
}
4210
4211
static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4212
{
4213
if (!(hdev->commands[18] & 0x04) ||
4214
!(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4215
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING))
4216
return 0;
4217
4218
return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4219
0, NULL, HCI_CMD_TIMEOUT);
4220
}
4221
4222
static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4223
{
4224
/* Some older Broadcom based Bluetooth 1.2 controllers do not
4225
* support the Read Page Scan Type command. Check support for
4226
* this command in the bit mask of supported commands.
4227
*/
4228
if (!(hdev->commands[13] & 0x01) ||
4229
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE))
4230
return 0;
4231
4232
return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4233
0, NULL, HCI_CMD_TIMEOUT);
4234
}
4235
4236
/* Read features beyond page 1 if available */
4237
static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4238
{
4239
u8 page;
4240
int err;
4241
4242
if (!lmp_ext_feat_capable(hdev))
4243
return 0;
4244
4245
for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4246
page++) {
4247
err = hci_read_local_ext_features_sync(hdev, page);
4248
if (err)
4249
return err;
4250
}
4251
4252
return 0;
4253
}
4254
4255
/* HCI Controller init stage 3 command sequence */
4256
static const struct hci_init_stage hci_init3[] = {
4257
/* HCI_OP_SET_EVENT_MASK */
4258
HCI_INIT(hci_set_event_mask_sync),
4259
/* HCI_OP_READ_STORED_LINK_KEY */
4260
HCI_INIT(hci_read_stored_link_key_sync),
4261
/* HCI_OP_WRITE_DEF_LINK_POLICY */
4262
HCI_INIT(hci_setup_link_policy_sync),
4263
/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4264
HCI_INIT(hci_read_page_scan_activity_sync),
4265
/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4266
HCI_INIT(hci_read_def_err_data_reporting_sync),
4267
/* HCI_OP_READ_PAGE_SCAN_TYPE */
4268
HCI_INIT(hci_read_page_scan_type_sync),
4269
/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4270
HCI_INIT(hci_read_local_ext_features_all_sync),
4271
{}
4272
};
4273
4274
static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4275
{
4276
u8 events[8];
4277
4278
if (!lmp_le_capable(hdev))
4279
return 0;
4280
4281
memset(events, 0, sizeof(events));
4282
4283
if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4284
events[0] |= 0x10; /* LE Long Term Key Request */
4285
4286
/* If controller supports the Connection Parameters Request
4287
* Link Layer Procedure, enable the corresponding event.
4288
*/
4289
if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4290
/* LE Remote Connection Parameter Request */
4291
events[0] |= 0x20;
4292
4293
/* If the controller supports the Data Length Extension
4294
* feature, enable the corresponding event.
4295
*/
4296
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4297
events[0] |= 0x40; /* LE Data Length Change */
4298
4299
/* If the controller supports LL Privacy feature or LE Extended Adv,
4300
* enable the corresponding event.
4301
*/
4302
if (use_enhanced_conn_complete(hdev))
4303
events[1] |= 0x02; /* LE Enhanced Connection Complete */
4304
4305
/* Mark Device Privacy if Privacy Mode is supported */
4306
if (privacy_mode_capable(hdev))
4307
hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4308
4309
/* Mark Address Resolution if LL Privacy is supported */
4310
if (ll_privacy_capable(hdev))
4311
hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION;
4312
4313
/* If the controller supports Extended Scanner Filter
4314
* Policies, enable the corresponding event.
4315
*/
4316
if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4317
events[1] |= 0x04; /* LE Direct Advertising Report */
4318
4319
/* If the controller supports Channel Selection Algorithm #2
4320
* feature, enable the corresponding event.
4321
*/
4322
if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4323
events[2] |= 0x08; /* LE Channel Selection Algorithm */
4324
4325
/* If the controller supports the LE Set Scan Enable command,
4326
* enable the corresponding advertising report event.
4327
*/
4328
if (hdev->commands[26] & 0x08)
4329
events[0] |= 0x02; /* LE Advertising Report */
4330
4331
/* If the controller supports the LE Create Connection
4332
* command, enable the corresponding event.
4333
*/
4334
if (hdev->commands[26] & 0x10)
4335
events[0] |= 0x01; /* LE Connection Complete */
4336
4337
/* If the controller supports the LE Connection Update
4338
* command, enable the corresponding event.
4339
*/
4340
if (hdev->commands[27] & 0x04)
4341
events[0] |= 0x04; /* LE Connection Update Complete */
4342
4343
/* If the controller supports the LE Read Remote Used Features
4344
* command, enable the corresponding event.
4345
*/
4346
if (hdev->commands[27] & 0x20)
4347
/* LE Read Remote Used Features Complete */
4348
events[0] |= 0x08;
4349
4350
/* If the controller supports the LE Read Local P-256
4351
* Public Key command, enable the corresponding event.
4352
*/
4353
if (hdev->commands[34] & 0x02)
4354
/* LE Read Local P-256 Public Key Complete */
4355
events[0] |= 0x80;
4356
4357
/* If the controller supports the LE Generate DHKey
4358
* command, enable the corresponding event.
4359
*/
4360
if (hdev->commands[34] & 0x04)
4361
events[1] |= 0x01; /* LE Generate DHKey Complete */
4362
4363
/* If the controller supports the LE Set Default PHY or
4364
* LE Set PHY commands, enable the corresponding event.
4365
*/
4366
if (hdev->commands[35] & (0x20 | 0x40))
4367
events[1] |= 0x08; /* LE PHY Update Complete */
4368
4369
/* If the controller supports LE Set Extended Scan Parameters
4370
* and LE Set Extended Scan Enable commands, enable the
4371
* corresponding event.
4372
*/
4373
if (use_ext_scan(hdev))
4374
events[1] |= 0x10; /* LE Extended Advertising Report */
4375
4376
/* If the controller supports the LE Extended Advertising
4377
* command, enable the corresponding event.
4378
*/
4379
if (ext_adv_capable(hdev))
4380
events[2] |= 0x02; /* LE Advertising Set Terminated */
4381
4382
if (cis_capable(hdev)) {
4383
events[3] |= 0x01; /* LE CIS Established */
4384
if (cis_peripheral_capable(hdev))
4385
events[3] |= 0x02; /* LE CIS Request */
4386
}
4387
4388
if (bis_capable(hdev)) {
4389
events[1] |= 0x20; /* LE PA Report */
4390
events[1] |= 0x40; /* LE PA Sync Established */
4391
events[3] |= 0x04; /* LE Create BIG Complete */
4392
events[3] |= 0x08; /* LE Terminate BIG Complete */
4393
events[3] |= 0x10; /* LE BIG Sync Established */
4394
events[3] |= 0x20; /* LE BIG Sync Loss */
4395
events[4] |= 0x02; /* LE BIG Info Advertising Report */
4396
}
4397
4398
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4399
sizeof(events), events, HCI_CMD_TIMEOUT);
4400
}
4401
4402
/* Read LE Advertising Channel TX Power */
4403
static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4404
{
4405
if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4406
/* HCI TS spec forbids mixing of legacy and extended
4407
* advertising commands wherein READ_ADV_TX_POWER is
4408
* also included. So do not call it if extended adv
4409
* is supported otherwise controller will return
4410
* COMMAND_DISALLOWED for extended commands.
4411
*/
4412
return __hci_cmd_sync_status(hdev,
4413
HCI_OP_LE_READ_ADV_TX_POWER,
4414
0, NULL, HCI_CMD_TIMEOUT);
4415
}
4416
4417
return 0;
4418
}
4419
4420
/* Read LE Min/Max Tx Power*/
4421
static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4422
{
4423
if (!(hdev->commands[38] & 0x80) ||
4424
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER))
4425
return 0;
4426
4427
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4428
0, NULL, HCI_CMD_TIMEOUT);
4429
}
4430
4431
/* Read LE Accept List Size */
4432
static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4433
{
4434
if (!(hdev->commands[26] & 0x40))
4435
return 0;
4436
4437
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4438
0, NULL, HCI_CMD_TIMEOUT);
4439
}
4440
4441
/* Read LE Resolving List Size */
4442
static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4443
{
4444
if (!(hdev->commands[34] & 0x40))
4445
return 0;
4446
4447
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4448
0, NULL, HCI_CMD_TIMEOUT);
4449
}
4450
4451
/* Clear LE Resolving List */
4452
static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4453
{
4454
if (!(hdev->commands[34] & 0x20))
4455
return 0;
4456
4457
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4458
HCI_CMD_TIMEOUT);
4459
}
4460
4461
/* Set RPA timeout */
4462
static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4463
{
4464
__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4465
4466
if (!(hdev->commands[35] & 0x04) ||
4467
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT))
4468
return 0;
4469
4470
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4471
sizeof(timeout), &timeout,
4472
HCI_CMD_TIMEOUT);
4473
}
4474
4475
/* Read LE Maximum Data Length */
4476
static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4477
{
4478
if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4479
return 0;
4480
4481
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4482
HCI_CMD_TIMEOUT);
4483
}
4484
4485
/* Read LE Suggested Default Data Length */
4486
static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4487
{
4488
if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4489
return 0;
4490
4491
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4492
HCI_CMD_TIMEOUT);
4493
}
4494
4495
/* Read LE Number of Supported Advertising Sets */
4496
static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4497
{
4498
if (!ext_adv_capable(hdev))
4499
return 0;
4500
4501
return __hci_cmd_sync_status(hdev,
4502
HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4503
0, NULL, HCI_CMD_TIMEOUT);
4504
}
4505
4506
/* Write LE Host Supported */
4507
static int hci_set_le_support_sync(struct hci_dev *hdev)
4508
{
4509
struct hci_cp_write_le_host_supported cp;
4510
4511
/* LE-only devices do not support explicit enablement */
4512
if (!lmp_bredr_capable(hdev))
4513
return 0;
4514
4515
memset(&cp, 0, sizeof(cp));
4516
4517
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4518
cp.le = 0x01;
4519
cp.simul = 0x00;
4520
}
4521
4522
if (cp.le == lmp_host_le_capable(hdev))
4523
return 0;
4524
4525
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4526
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4527
}
4528
4529
/* LE Set Host Feature */
4530
static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4531
{
4532
struct hci_cp_le_set_host_feature cp;
4533
4534
if (!iso_capable(hdev))
4535
return 0;
4536
4537
memset(&cp, 0, sizeof(cp));
4538
4539
/* Connected Isochronous Channels (Host Support) */
4540
cp.bit_number = 32;
4541
cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00;
4542
4543
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4544
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4545
}
4546
4547
/* LE Controller init stage 3 command sequence */
4548
static const struct hci_init_stage le_init3[] = {
4549
/* HCI_OP_LE_SET_EVENT_MASK */
4550
HCI_INIT(hci_le_set_event_mask_sync),
4551
/* HCI_OP_LE_READ_ADV_TX_POWER */
4552
HCI_INIT(hci_le_read_adv_tx_power_sync),
4553
/* HCI_OP_LE_READ_TRANSMIT_POWER */
4554
HCI_INIT(hci_le_read_tx_power_sync),
4555
/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4556
HCI_INIT(hci_le_read_accept_list_size_sync),
4557
/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4558
HCI_INIT(hci_le_clear_accept_list_sync),
4559
/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4560
HCI_INIT(hci_le_read_resolv_list_size_sync),
4561
/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4562
HCI_INIT(hci_le_clear_resolv_list_sync),
4563
/* HCI_OP_LE_SET_RPA_TIMEOUT */
4564
HCI_INIT(hci_le_set_rpa_timeout_sync),
4565
/* HCI_OP_LE_READ_MAX_DATA_LEN */
4566
HCI_INIT(hci_le_read_max_data_len_sync),
4567
/* HCI_OP_LE_READ_DEF_DATA_LEN */
4568
HCI_INIT(hci_le_read_def_data_len_sync),
4569
/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4570
HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4571
/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4572
HCI_INIT(hci_set_le_support_sync),
4573
/* HCI_OP_LE_SET_HOST_FEATURE */
4574
HCI_INIT(hci_le_set_host_feature_sync),
4575
{}
4576
};
4577
4578
static int hci_init3_sync(struct hci_dev *hdev)
4579
{
4580
int err;
4581
4582
bt_dev_dbg(hdev, "");
4583
4584
err = hci_init_stage_sync(hdev, hci_init3);
4585
if (err)
4586
return err;
4587
4588
if (lmp_le_capable(hdev))
4589
return hci_init_stage_sync(hdev, le_init3);
4590
4591
return 0;
4592
}
4593
4594
static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4595
{
4596
struct hci_cp_delete_stored_link_key cp;
4597
4598
/* Some Broadcom based Bluetooth controllers do not support the
4599
* Delete Stored Link Key command. They are clearly indicating its
4600
* absence in the bit mask of supported commands.
4601
*
4602
* Check the supported commands and only if the command is marked
4603
* as supported send it. If not supported assume that the controller
4604
* does not have actual support for stored link keys which makes this
4605
* command redundant anyway.
4606
*
4607
* Some controllers indicate that they support handling deleting
4608
* stored link keys, but they don't. The quirk lets a driver
4609
* just disable this command.
4610
*/
4611
if (!(hdev->commands[6] & 0x80) ||
4612
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY))
4613
return 0;
4614
4615
memset(&cp, 0, sizeof(cp));
4616
bacpy(&cp.bdaddr, BDADDR_ANY);
4617
cp.delete_all = 0x01;
4618
4619
return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4620
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4621
}
4622
4623
static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4624
{
4625
u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4626
bool changed = false;
4627
4628
/* Set event mask page 2 if the HCI command for it is supported */
4629
if (!(hdev->commands[22] & 0x04))
4630
return 0;
4631
4632
/* If Connectionless Peripheral Broadcast central role is supported
4633
* enable all necessary events for it.
4634
*/
4635
if (lmp_cpb_central_capable(hdev)) {
4636
events[1] |= 0x40; /* Triggered Clock Capture */
4637
events[1] |= 0x80; /* Synchronization Train Complete */
4638
events[2] |= 0x08; /* Truncated Page Complete */
4639
events[2] |= 0x20; /* CPB Channel Map Change */
4640
changed = true;
4641
}
4642
4643
/* If Connectionless Peripheral Broadcast peripheral role is supported
4644
* enable all necessary events for it.
4645
*/
4646
if (lmp_cpb_peripheral_capable(hdev)) {
4647
events[2] |= 0x01; /* Synchronization Train Received */
4648
events[2] |= 0x02; /* CPB Receive */
4649
events[2] |= 0x04; /* CPB Timeout */
4650
events[2] |= 0x10; /* Peripheral Page Response Timeout */
4651
changed = true;
4652
}
4653
4654
/* Enable Authenticated Payload Timeout Expired event if supported */
4655
if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4656
events[2] |= 0x80;
4657
changed = true;
4658
}
4659
4660
/* Some Broadcom based controllers indicate support for Set Event
4661
* Mask Page 2 command, but then actually do not support it. Since
4662
* the default value is all bits set to zero, the command is only
4663
* required if the event mask has to be changed. In case no change
4664
* to the event mask is needed, skip this command.
4665
*/
4666
if (!changed)
4667
return 0;
4668
4669
return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4670
sizeof(events), events, HCI_CMD_TIMEOUT);
4671
}
4672
4673
/* Read local codec list if the HCI command is supported */
4674
static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4675
{
4676
if (hdev->commands[45] & 0x04)
4677
hci_read_supported_codecs_v2(hdev);
4678
else if (hdev->commands[29] & 0x20)
4679
hci_read_supported_codecs(hdev);
4680
4681
return 0;
4682
}
4683
4684
/* Read local pairing options if the HCI command is supported */
4685
static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4686
{
4687
if (!(hdev->commands[41] & 0x08))
4688
return 0;
4689
4690
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4691
0, NULL, HCI_CMD_TIMEOUT);
4692
}
4693
4694
/* Get MWS transport configuration if the HCI command is supported */
4695
static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4696
{
4697
if (!mws_transport_config_capable(hdev))
4698
return 0;
4699
4700
return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4701
0, NULL, HCI_CMD_TIMEOUT);
4702
}
4703
4704
/* Check for Synchronization Train support */
4705
static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4706
{
4707
if (!lmp_sync_train_capable(hdev))
4708
return 0;
4709
4710
return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4711
0, NULL, HCI_CMD_TIMEOUT);
4712
}
4713
4714
/* Enable Secure Connections if supported and configured */
4715
static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4716
{
4717
u8 support = 0x01;
4718
4719
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4720
!bredr_sc_enabled(hdev))
4721
return 0;
4722
4723
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4724
sizeof(support), &support,
4725
HCI_CMD_TIMEOUT);
4726
}
4727
4728
/* Set erroneous data reporting if supported to the wideband speech
4729
* setting value
4730
*/
4731
static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4732
{
4733
struct hci_cp_write_def_err_data_reporting cp;
4734
bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4735
4736
if (!(hdev->commands[18] & 0x08) ||
4737
!(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4738
hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING))
4739
return 0;
4740
4741
if (enabled == hdev->err_data_reporting)
4742
return 0;
4743
4744
memset(&cp, 0, sizeof(cp));
4745
cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4746
ERR_DATA_REPORTING_DISABLED;
4747
4748
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4749
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4750
}
4751
4752
static const struct hci_init_stage hci_init4[] = {
4753
/* HCI_OP_DELETE_STORED_LINK_KEY */
4754
HCI_INIT(hci_delete_stored_link_key_sync),
4755
/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4756
HCI_INIT(hci_set_event_mask_page_2_sync),
4757
/* HCI_OP_READ_LOCAL_CODECS */
4758
HCI_INIT(hci_read_local_codecs_sync),
4759
/* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4760
HCI_INIT(hci_read_local_pairing_opts_sync),
4761
/* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4762
HCI_INIT(hci_get_mws_transport_config_sync),
4763
/* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4764
HCI_INIT(hci_read_sync_train_params_sync),
4765
/* HCI_OP_WRITE_SC_SUPPORT */
4766
HCI_INIT(hci_write_sc_support_1_sync),
4767
/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4768
HCI_INIT(hci_set_err_data_report_sync),
4769
{}
4770
};
4771
4772
/* Set Suggested Default Data Length to maximum if supported */
4773
static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4774
{
4775
struct hci_cp_le_write_def_data_len cp;
4776
4777
if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4778
return 0;
4779
4780
memset(&cp, 0, sizeof(cp));
4781
cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4782
cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4783
4784
return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4785
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4786
}
4787
4788
/* Set Default PHY parameters if command is supported, enables all supported
4789
* PHYs according to the LE Features bits.
4790
*/
4791
static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4792
{
4793
struct hci_cp_le_set_default_phy cp;
4794
4795
if (!(hdev->commands[35] & 0x20)) {
4796
/* If the command is not supported it means only 1M PHY is
4797
* supported.
4798
*/
4799
hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4800
hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4801
return 0;
4802
}
4803
4804
memset(&cp, 0, sizeof(cp));
4805
cp.all_phys = 0x00;
4806
cp.tx_phys = HCI_LE_SET_PHY_1M;
4807
cp.rx_phys = HCI_LE_SET_PHY_1M;
4808
4809
/* Enables 2M PHY if supported */
4810
if (le_2m_capable(hdev)) {
4811
cp.tx_phys |= HCI_LE_SET_PHY_2M;
4812
cp.rx_phys |= HCI_LE_SET_PHY_2M;
4813
}
4814
4815
/* Enables Coded PHY if supported */
4816
if (le_coded_capable(hdev)) {
4817
cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4818
cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4819
}
4820
4821
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4822
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4823
}
4824
4825
static const struct hci_init_stage le_init4[] = {
4826
/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4827
HCI_INIT(hci_le_set_write_def_data_len_sync),
4828
/* HCI_OP_LE_SET_DEFAULT_PHY */
4829
HCI_INIT(hci_le_set_default_phy_sync),
4830
{}
4831
};
4832
4833
static int hci_init4_sync(struct hci_dev *hdev)
4834
{
4835
int err;
4836
4837
bt_dev_dbg(hdev, "");
4838
4839
err = hci_init_stage_sync(hdev, hci_init4);
4840
if (err)
4841
return err;
4842
4843
if (lmp_le_capable(hdev))
4844
return hci_init_stage_sync(hdev, le_init4);
4845
4846
return 0;
4847
}
4848
4849
static int hci_init_sync(struct hci_dev *hdev)
4850
{
4851
int err;
4852
4853
err = hci_init1_sync(hdev);
4854
if (err < 0)
4855
return err;
4856
4857
if (hci_dev_test_flag(hdev, HCI_SETUP))
4858
hci_debugfs_create_basic(hdev);
4859
4860
err = hci_init2_sync(hdev);
4861
if (err < 0)
4862
return err;
4863
4864
err = hci_init3_sync(hdev);
4865
if (err < 0)
4866
return err;
4867
4868
err = hci_init4_sync(hdev);
4869
if (err < 0)
4870
return err;
4871
4872
/* This function is only called when the controller is actually in
4873
* configured state. When the controller is marked as unconfigured,
4874
* this initialization procedure is not run.
4875
*
4876
* It means that it is possible that a controller runs through its
4877
* setup phase and then discovers missing settings. If that is the
4878
* case, then this function will not be called. It then will only
4879
* be called during the config phase.
4880
*
4881
* So only when in setup phase or config phase, create the debugfs
4882
* entries and register the SMP channels.
4883
*/
4884
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4885
!hci_dev_test_flag(hdev, HCI_CONFIG))
4886
return 0;
4887
4888
if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4889
return 0;
4890
4891
hci_debugfs_create_common(hdev);
4892
4893
if (lmp_bredr_capable(hdev))
4894
hci_debugfs_create_bredr(hdev);
4895
4896
if (lmp_le_capable(hdev))
4897
hci_debugfs_create_le(hdev);
4898
4899
return 0;
4900
}
4901
4902
#define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4903
4904
static const struct {
4905
unsigned long quirk;
4906
const char *desc;
4907
} hci_broken_table[] = {
4908
HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4909
"HCI Read Local Supported Commands not supported"),
4910
HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4911
"HCI Delete Stored Link Key command is advertised, "
4912
"but not supported."),
4913
HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4914
"HCI Read Default Erroneous Data Reporting command is "
4915
"advertised, but not supported."),
4916
HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4917
"HCI Read Transmit Power Level command is advertised, "
4918
"but not supported."),
4919
HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4920
"HCI Set Event Filter command not supported."),
4921
HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4922
"HCI Enhanced Setup Synchronous Connection command is "
4923
"advertised, but not supported."),
4924
HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4925
"HCI LE Set Random Private Address Timeout command is "
4926
"advertised, but not supported."),
4927
HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
4928
"HCI LE Extended Create Connection command is "
4929
"advertised, but not supported."),
4930
HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
4931
"HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
4932
"to unexpected SMP errors when pairing "
4933
"and will not be used."),
4934
HCI_QUIRK_BROKEN(LE_CODED,
4935
"HCI LE Coded PHY feature bit is set, "
4936
"but its usage is not supported.")
4937
};
4938
4939
/* This function handles hdev setup stage:
4940
*
4941
* Calls hdev->setup
4942
* Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4943
*/
4944
static int hci_dev_setup_sync(struct hci_dev *hdev)
4945
{
4946
int ret = 0;
4947
bool invalid_bdaddr;
4948
size_t i;
4949
4950
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4951
!hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP))
4952
return 0;
4953
4954
bt_dev_dbg(hdev, "");
4955
4956
hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4957
4958
if (hdev->setup)
4959
ret = hdev->setup(hdev);
4960
4961
for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4962
if (hci_test_quirk(hdev, hci_broken_table[i].quirk))
4963
bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4964
}
4965
4966
/* The transport driver can set the quirk to mark the
4967
* BD_ADDR invalid before creating the HCI device or in
4968
* its setup callback.
4969
*/
4970
invalid_bdaddr = hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
4971
hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
4972
if (!ret) {
4973
if (hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY) &&
4974
!bacmp(&hdev->public_addr, BDADDR_ANY))
4975
hci_dev_get_bd_addr_from_property(hdev);
4976
4977
if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4978
hdev->set_bdaddr) {
4979
ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4980
if (!ret)
4981
invalid_bdaddr = false;
4982
}
4983
}
4984
4985
/* The transport driver can set these quirks before
4986
* creating the HCI device or in its setup callback.
4987
*
4988
* For the invalid BD_ADDR quirk it is possible that
4989
* it becomes a valid address if the bootloader does
4990
* provide it (see above).
4991
*
4992
* In case any of them is set, the controller has to
4993
* start up as unconfigured.
4994
*/
4995
if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) ||
4996
invalid_bdaddr)
4997
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4998
4999
/* For an unconfigured controller it is required to
5000
* read at least the version information provided by
5001
* the Read Local Version Information command.
5002
*
5003
* If the set_bdaddr driver callback is provided, then
5004
* also the original Bluetooth public device address
5005
* will be read using the Read BD Address command.
5006
*/
5007
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5008
return hci_unconf_init_sync(hdev);
5009
5010
return ret;
5011
}
5012
5013
/* This function handles hdev init stage:
5014
*
5015
* Calls hci_dev_setup_sync to perform setup stage
5016
* Calls hci_init_sync to perform HCI command init sequence
5017
*/
5018
static int hci_dev_init_sync(struct hci_dev *hdev)
5019
{
5020
int ret;
5021
5022
bt_dev_dbg(hdev, "");
5023
5024
atomic_set(&hdev->cmd_cnt, 1);
5025
set_bit(HCI_INIT, &hdev->flags);
5026
5027
ret = hci_dev_setup_sync(hdev);
5028
5029
if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
5030
/* If public address change is configured, ensure that
5031
* the address gets programmed. If the driver does not
5032
* support changing the public address, fail the power
5033
* on procedure.
5034
*/
5035
if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
5036
hdev->set_bdaddr)
5037
ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
5038
else
5039
ret = -EADDRNOTAVAIL;
5040
}
5041
5042
if (!ret) {
5043
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5044
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5045
ret = hci_init_sync(hdev);
5046
if (!ret && hdev->post_init)
5047
ret = hdev->post_init(hdev);
5048
}
5049
}
5050
5051
/* If the HCI Reset command is clearing all diagnostic settings,
5052
* then they need to be reprogrammed after the init procedure
5053
* completed.
5054
*/
5055
if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG) &&
5056
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5057
hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
5058
ret = hdev->set_diag(hdev, true);
5059
5060
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5061
msft_do_open(hdev);
5062
aosp_do_open(hdev);
5063
}
5064
5065
clear_bit(HCI_INIT, &hdev->flags);
5066
5067
return ret;
5068
}
5069
5070
int hci_dev_open_sync(struct hci_dev *hdev)
5071
{
5072
int ret;
5073
5074
bt_dev_dbg(hdev, "");
5075
5076
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5077
ret = -ENODEV;
5078
goto done;
5079
}
5080
5081
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5082
!hci_dev_test_flag(hdev, HCI_CONFIG)) {
5083
/* Check for rfkill but allow the HCI setup stage to
5084
* proceed (which in itself doesn't cause any RF activity).
5085
*/
5086
if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
5087
ret = -ERFKILL;
5088
goto done;
5089
}
5090
5091
/* Check for valid public address or a configured static
5092
* random address, but let the HCI setup proceed to
5093
* be able to determine if there is a public address
5094
* or not.
5095
*
5096
* In case of user channel usage, it is not important
5097
* if a public address or static random address is
5098
* available.
5099
*/
5100
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5101
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5102
!bacmp(&hdev->static_addr, BDADDR_ANY)) {
5103
ret = -EADDRNOTAVAIL;
5104
goto done;
5105
}
5106
}
5107
5108
if (test_bit(HCI_UP, &hdev->flags)) {
5109
ret = -EALREADY;
5110
goto done;
5111
}
5112
5113
if (hdev->open(hdev)) {
5114
ret = -EIO;
5115
goto done;
5116
}
5117
5118
hci_devcd_reset(hdev);
5119
5120
set_bit(HCI_RUNNING, &hdev->flags);
5121
hci_sock_dev_event(hdev, HCI_DEV_OPEN);
5122
5123
ret = hci_dev_init_sync(hdev);
5124
if (!ret) {
5125
hci_dev_hold(hdev);
5126
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5127
hci_adv_instances_set_rpa_expired(hdev, true);
5128
set_bit(HCI_UP, &hdev->flags);
5129
hci_sock_dev_event(hdev, HCI_DEV_UP);
5130
hci_leds_update_powered(hdev, true);
5131
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5132
!hci_dev_test_flag(hdev, HCI_CONFIG) &&
5133
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5134
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5135
hci_dev_test_flag(hdev, HCI_MGMT)) {
5136
ret = hci_powered_update_sync(hdev);
5137
mgmt_power_on(hdev, ret);
5138
}
5139
} else {
5140
/* Init failed, cleanup */
5141
flush_work(&hdev->tx_work);
5142
5143
/* Since hci_rx_work() is possible to awake new cmd_work
5144
* it should be flushed first to avoid unexpected call of
5145
* hci_cmd_work()
5146
*/
5147
flush_work(&hdev->rx_work);
5148
flush_work(&hdev->cmd_work);
5149
5150
skb_queue_purge(&hdev->cmd_q);
5151
skb_queue_purge(&hdev->rx_q);
5152
5153
if (hdev->flush)
5154
hdev->flush(hdev);
5155
5156
if (hdev->sent_cmd) {
5157
cancel_delayed_work_sync(&hdev->cmd_timer);
5158
kfree_skb(hdev->sent_cmd);
5159
hdev->sent_cmd = NULL;
5160
}
5161
5162
if (hdev->req_skb) {
5163
kfree_skb(hdev->req_skb);
5164
hdev->req_skb = NULL;
5165
}
5166
5167
clear_bit(HCI_RUNNING, &hdev->flags);
5168
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5169
5170
hdev->close(hdev);
5171
hdev->flags &= BIT(HCI_RAW);
5172
}
5173
5174
done:
5175
return ret;
5176
}
5177
5178
/* This function requires the caller holds hdev->lock */
5179
static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5180
{
5181
struct hci_conn_params *p;
5182
5183
list_for_each_entry(p, &hdev->le_conn_params, list) {
5184
hci_pend_le_list_del_init(p);
5185
if (p->conn) {
5186
hci_conn_drop(p->conn);
5187
hci_conn_put(p->conn);
5188
p->conn = NULL;
5189
}
5190
}
5191
5192
BT_DBG("All LE pending actions cleared");
5193
}
5194
5195
static int hci_dev_shutdown(struct hci_dev *hdev)
5196
{
5197
int err = 0;
5198
/* Similar to how we first do setup and then set the exclusive access
5199
* bit for userspace, we must first unset userchannel and then clean up.
5200
* Otherwise, the kernel can't properly use the hci channel to clean up
5201
* the controller (some shutdown routines require sending additional
5202
* commands to the controller for example).
5203
*/
5204
bool was_userchannel =
5205
hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5206
5207
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5208
test_bit(HCI_UP, &hdev->flags)) {
5209
/* Execute vendor specific shutdown routine */
5210
if (hdev->shutdown)
5211
err = hdev->shutdown(hdev);
5212
}
5213
5214
if (was_userchannel)
5215
hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5216
5217
return err;
5218
}
5219
5220
int hci_dev_close_sync(struct hci_dev *hdev)
5221
{
5222
bool auto_off;
5223
int err = 0;
5224
5225
bt_dev_dbg(hdev, "");
5226
5227
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5228
disable_delayed_work(&hdev->power_off);
5229
disable_delayed_work(&hdev->ncmd_timer);
5230
disable_delayed_work(&hdev->le_scan_disable);
5231
} else {
5232
cancel_delayed_work(&hdev->power_off);
5233
cancel_delayed_work(&hdev->ncmd_timer);
5234
cancel_delayed_work(&hdev->le_scan_disable);
5235
}
5236
5237
hci_cmd_sync_cancel_sync(hdev, ENODEV);
5238
5239
cancel_interleave_scan(hdev);
5240
5241
if (hdev->adv_instance_timeout) {
5242
cancel_delayed_work_sync(&hdev->adv_instance_expire);
5243
hdev->adv_instance_timeout = 0;
5244
}
5245
5246
err = hci_dev_shutdown(hdev);
5247
5248
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5249
cancel_delayed_work_sync(&hdev->cmd_timer);
5250
return err;
5251
}
5252
5253
hci_leds_update_powered(hdev, false);
5254
5255
/* Flush RX and TX works */
5256
flush_work(&hdev->tx_work);
5257
flush_work(&hdev->rx_work);
5258
5259
if (hdev->discov_timeout > 0) {
5260
hdev->discov_timeout = 0;
5261
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5262
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5263
}
5264
5265
if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5266
cancel_delayed_work(&hdev->service_cache);
5267
5268
if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5269
struct adv_info *adv_instance;
5270
5271
cancel_delayed_work_sync(&hdev->rpa_expired);
5272
5273
list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5274
cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5275
}
5276
5277
/* Avoid potential lockdep warnings from the *_flush() calls by
5278
* ensuring the workqueue is empty up front.
5279
*/
5280
drain_workqueue(hdev->workqueue);
5281
5282
hci_dev_lock(hdev);
5283
5284
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5285
5286
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5287
5288
if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5289
hci_dev_test_flag(hdev, HCI_MGMT))
5290
__mgmt_power_off(hdev);
5291
5292
hci_inquiry_cache_flush(hdev);
5293
hci_pend_le_actions_clear(hdev);
5294
hci_conn_hash_flush(hdev);
5295
/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5296
smp_unregister(hdev);
5297
hci_dev_unlock(hdev);
5298
5299
hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5300
5301
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5302
aosp_do_close(hdev);
5303
msft_do_close(hdev);
5304
}
5305
5306
if (hdev->flush)
5307
hdev->flush(hdev);
5308
5309
/* Reset device */
5310
skb_queue_purge(&hdev->cmd_q);
5311
atomic_set(&hdev->cmd_cnt, 1);
5312
if (hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE) &&
5313
!auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5314
set_bit(HCI_INIT, &hdev->flags);
5315
hci_reset_sync(hdev);
5316
clear_bit(HCI_INIT, &hdev->flags);
5317
}
5318
5319
/* flush cmd work */
5320
flush_work(&hdev->cmd_work);
5321
5322
/* Drop queues */
5323
skb_queue_purge(&hdev->rx_q);
5324
skb_queue_purge(&hdev->cmd_q);
5325
skb_queue_purge(&hdev->raw_q);
5326
5327
/* Drop last sent command */
5328
if (hdev->sent_cmd) {
5329
cancel_delayed_work_sync(&hdev->cmd_timer);
5330
kfree_skb(hdev->sent_cmd);
5331
hdev->sent_cmd = NULL;
5332
}
5333
5334
/* Drop last request */
5335
if (hdev->req_skb) {
5336
kfree_skb(hdev->req_skb);
5337
hdev->req_skb = NULL;
5338
}
5339
5340
clear_bit(HCI_RUNNING, &hdev->flags);
5341
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5342
5343
/* After this point our queues are empty and no tasks are scheduled. */
5344
hdev->close(hdev);
5345
5346
/* Clear flags */
5347
hdev->flags &= BIT(HCI_RAW);
5348
hci_dev_clear_volatile_flags(hdev);
5349
5350
memset(hdev->eir, 0, sizeof(hdev->eir));
5351
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5352
bacpy(&hdev->random_addr, BDADDR_ANY);
5353
hci_codec_list_clear(&hdev->local_codecs);
5354
5355
hci_dev_put(hdev);
5356
return err;
5357
}
5358
5359
/* This function perform power on HCI command sequence as follows:
5360
*
5361
* If controller is already up (HCI_UP) performs hci_powered_update_sync
5362
* sequence otherwise run hci_dev_open_sync which will follow with
5363
* hci_powered_update_sync after the init sequence is completed.
5364
*/
5365
static int hci_power_on_sync(struct hci_dev *hdev)
5366
{
5367
int err;
5368
5369
if (test_bit(HCI_UP, &hdev->flags) &&
5370
hci_dev_test_flag(hdev, HCI_MGMT) &&
5371
hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5372
cancel_delayed_work(&hdev->power_off);
5373
return hci_powered_update_sync(hdev);
5374
}
5375
5376
err = hci_dev_open_sync(hdev);
5377
if (err < 0)
5378
return err;
5379
5380
/* During the HCI setup phase, a few error conditions are
5381
* ignored and they need to be checked now. If they are still
5382
* valid, it is important to return the device back off.
5383
*/
5384
if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5385
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5386
(!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5387
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
5388
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5389
hci_dev_close_sync(hdev);
5390
} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5391
queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5392
HCI_AUTO_OFF_TIMEOUT);
5393
}
5394
5395
if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5396
/* For unconfigured devices, set the HCI_RAW flag
5397
* so that userspace can easily identify them.
5398
*/
5399
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5400
set_bit(HCI_RAW, &hdev->flags);
5401
5402
/* For fully configured devices, this will send
5403
* the Index Added event. For unconfigured devices,
5404
* it will send Unconfigued Index Added event.
5405
*
5406
* Devices with HCI_QUIRK_RAW_DEVICE are ignored
5407
* and no event will be send.
5408
*/
5409
mgmt_index_added(hdev);
5410
} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5411
/* When the controller is now configured, then it
5412
* is important to clear the HCI_RAW flag.
5413
*/
5414
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5415
clear_bit(HCI_RAW, &hdev->flags);
5416
5417
/* Powering on the controller with HCI_CONFIG set only
5418
* happens with the transition from unconfigured to
5419
* configured. This will send the Index Added event.
5420
*/
5421
mgmt_index_added(hdev);
5422
}
5423
5424
return 0;
5425
}
5426
5427
static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5428
{
5429
struct hci_cp_remote_name_req_cancel cp;
5430
5431
memset(&cp, 0, sizeof(cp));
5432
bacpy(&cp.bdaddr, addr);
5433
5434
return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5435
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5436
}
5437
5438
int hci_stop_discovery_sync(struct hci_dev *hdev)
5439
{
5440
struct discovery_state *d = &hdev->discovery;
5441
struct inquiry_entry *e;
5442
int err;
5443
5444
bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5445
5446
if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5447
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5448
err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5449
0, NULL, HCI_CMD_TIMEOUT);
5450
if (err)
5451
return err;
5452
}
5453
5454
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5455
cancel_delayed_work(&hdev->le_scan_disable);
5456
5457
err = hci_scan_disable_sync(hdev);
5458
if (err)
5459
return err;
5460
}
5461
5462
} else {
5463
err = hci_scan_disable_sync(hdev);
5464
if (err)
5465
return err;
5466
}
5467
5468
/* Resume advertising if it was paused */
5469
if (ll_privacy_capable(hdev))
5470
hci_resume_advertising_sync(hdev);
5471
5472
/* No further actions needed for LE-only discovery */
5473
if (d->type == DISCOV_TYPE_LE)
5474
return 0;
5475
5476
if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5477
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5478
NAME_PENDING);
5479
if (!e)
5480
return 0;
5481
5482
/* Ignore cancel errors since it should interfere with stopping
5483
* of the discovery.
5484
*/
5485
hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5486
}
5487
5488
return 0;
5489
}
5490
5491
static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5492
u8 reason)
5493
{
5494
struct hci_cp_disconnect cp;
5495
5496
if (conn->type == BIS_LINK || conn->type == PA_LINK) {
5497
/* This is a BIS connection, hci_conn_del will
5498
* do the necessary cleanup.
5499
*/
5500
hci_dev_lock(hdev);
5501
hci_conn_failed(conn, reason);
5502
hci_dev_unlock(hdev);
5503
5504
return 0;
5505
}
5506
5507
memset(&cp, 0, sizeof(cp));
5508
cp.handle = cpu_to_le16(conn->handle);
5509
cp.reason = reason;
5510
5511
/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5512
* reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5513
* used when suspending or powering off, where we don't want to wait
5514
* for the peer's response.
5515
*/
5516
if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5517
return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5518
sizeof(cp), &cp,
5519
HCI_EV_DISCONN_COMPLETE,
5520
HCI_CMD_TIMEOUT, NULL);
5521
5522
return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5523
HCI_CMD_TIMEOUT);
5524
}
5525
5526
static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5527
struct hci_conn *conn, u8 reason)
5528
{
5529
/* Return reason if scanning since the connection shall probably be
5530
* cleanup directly.
5531
*/
5532
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5533
return reason;
5534
5535
if (conn->role == HCI_ROLE_SLAVE ||
5536
test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5537
return 0;
5538
5539
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5540
0, NULL, HCI_CMD_TIMEOUT);
5541
}
5542
5543
static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5544
u8 reason)
5545
{
5546
if (conn->type == LE_LINK)
5547
return hci_le_connect_cancel_sync(hdev, conn, reason);
5548
5549
if (conn->type == CIS_LINK) {
5550
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5551
* page 1857:
5552
*
5553
* If this command is issued for a CIS on the Central and the
5554
* CIS is successfully terminated before being established,
5555
* then an HCI_LE_CIS_Established event shall also be sent for
5556
* this CIS with the Status Operation Cancelled by Host (0x44).
5557
*/
5558
if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5559
return hci_disconnect_sync(hdev, conn, reason);
5560
5561
/* CIS with no Create CIS sent have nothing to cancel */
5562
return HCI_ERROR_LOCAL_HOST_TERM;
5563
}
5564
5565
if (conn->type == BIS_LINK || conn->type == PA_LINK) {
5566
/* There is no way to cancel a BIS without terminating the BIG
5567
* which is done later on connection cleanup.
5568
*/
5569
return 0;
5570
}
5571
5572
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5573
return 0;
5574
5575
/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5576
* reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5577
* used when suspending or powering off, where we don't want to wait
5578
* for the peer's response.
5579
*/
5580
if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5581
return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5582
6, &conn->dst,
5583
HCI_EV_CONN_COMPLETE,
5584
HCI_CMD_TIMEOUT, NULL);
5585
5586
return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5587
6, &conn->dst, HCI_CMD_TIMEOUT);
5588
}
5589
5590
static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5591
u8 reason)
5592
{
5593
struct hci_cp_reject_sync_conn_req cp;
5594
5595
memset(&cp, 0, sizeof(cp));
5596
bacpy(&cp.bdaddr, &conn->dst);
5597
cp.reason = reason;
5598
5599
/* SCO rejection has its own limited set of
5600
* allowed error values (0x0D-0x0F).
5601
*/
5602
if (reason < 0x0d || reason > 0x0f)
5603
cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5604
5605
return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5606
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5607
}
5608
5609
static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5610
u8 reason)
5611
{
5612
struct hci_cp_le_reject_cis cp;
5613
5614
memset(&cp, 0, sizeof(cp));
5615
cp.handle = cpu_to_le16(conn->handle);
5616
cp.reason = reason;
5617
5618
return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5619
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5620
}
5621
5622
static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5623
u8 reason)
5624
{
5625
struct hci_cp_reject_conn_req cp;
5626
5627
if (conn->type == CIS_LINK)
5628
return hci_le_reject_cis_sync(hdev, conn, reason);
5629
5630
if (conn->type == BIS_LINK || conn->type == PA_LINK)
5631
return -EINVAL;
5632
5633
if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5634
return hci_reject_sco_sync(hdev, conn, reason);
5635
5636
memset(&cp, 0, sizeof(cp));
5637
bacpy(&cp.bdaddr, &conn->dst);
5638
cp.reason = reason;
5639
5640
return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5641
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5642
}
5643
5644
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5645
{
5646
int err = 0;
5647
u16 handle = conn->handle;
5648
bool disconnect = false;
5649
struct hci_conn *c;
5650
5651
switch (conn->state) {
5652
case BT_CONNECTED:
5653
case BT_CONFIG:
5654
err = hci_disconnect_sync(hdev, conn, reason);
5655
break;
5656
case BT_CONNECT:
5657
err = hci_connect_cancel_sync(hdev, conn, reason);
5658
break;
5659
case BT_CONNECT2:
5660
err = hci_reject_conn_sync(hdev, conn, reason);
5661
break;
5662
case BT_OPEN:
5663
case BT_BOUND:
5664
break;
5665
default:
5666
disconnect = true;
5667
break;
5668
}
5669
5670
hci_dev_lock(hdev);
5671
5672
/* Check if the connection has been cleaned up concurrently */
5673
c = hci_conn_hash_lookup_handle(hdev, handle);
5674
if (!c || c != conn) {
5675
err = 0;
5676
goto unlock;
5677
}
5678
5679
/* Cleanup hci_conn object if it cannot be cancelled as it
5680
* likely means the controller and host stack are out of sync
5681
* or in case of LE it was still scanning so it can be cleanup
5682
* safely.
5683
*/
5684
if (disconnect) {
5685
conn->state = BT_CLOSED;
5686
hci_disconn_cfm(conn, reason);
5687
hci_conn_del(conn);
5688
} else {
5689
hci_conn_failed(conn, reason);
5690
}
5691
5692
unlock:
5693
hci_dev_unlock(hdev);
5694
return err;
5695
}
5696
5697
static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5698
{
5699
struct list_head *head = &hdev->conn_hash.list;
5700
struct hci_conn *conn;
5701
5702
rcu_read_lock();
5703
while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5704
/* Make sure the connection is not freed while unlocking */
5705
conn = hci_conn_get(conn);
5706
rcu_read_unlock();
5707
/* Disregard possible errors since hci_conn_del shall have been
5708
* called even in case of errors had occurred since it would
5709
* then cause hci_conn_failed to be called which calls
5710
* hci_conn_del internally.
5711
*/
5712
hci_abort_conn_sync(hdev, conn, reason);
5713
hci_conn_put(conn);
5714
rcu_read_lock();
5715
}
5716
rcu_read_unlock();
5717
5718
return 0;
5719
}
5720
5721
/* This function perform power off HCI command sequence as follows:
5722
*
5723
* Clear Advertising
5724
* Stop Discovery
5725
* Disconnect all connections
5726
* hci_dev_close_sync
5727
*/
5728
static int hci_power_off_sync(struct hci_dev *hdev)
5729
{
5730
int err;
5731
5732
/* If controller is already down there is nothing to do */
5733
if (!test_bit(HCI_UP, &hdev->flags))
5734
return 0;
5735
5736
hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5737
5738
if (test_bit(HCI_ISCAN, &hdev->flags) ||
5739
test_bit(HCI_PSCAN, &hdev->flags)) {
5740
err = hci_write_scan_enable_sync(hdev, 0x00);
5741
if (err)
5742
goto out;
5743
}
5744
5745
err = hci_clear_adv_sync(hdev, NULL, false);
5746
if (err)
5747
goto out;
5748
5749
err = hci_stop_discovery_sync(hdev);
5750
if (err)
5751
goto out;
5752
5753
/* Terminated due to Power Off */
5754
err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5755
if (err)
5756
goto out;
5757
5758
err = hci_dev_close_sync(hdev);
5759
5760
out:
5761
hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5762
return err;
5763
}
5764
5765
int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5766
{
5767
if (val)
5768
return hci_power_on_sync(hdev);
5769
5770
return hci_power_off_sync(hdev);
5771
}
5772
5773
static int hci_write_iac_sync(struct hci_dev *hdev)
5774
{
5775
struct hci_cp_write_current_iac_lap cp;
5776
5777
if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5778
return 0;
5779
5780
memset(&cp, 0, sizeof(cp));
5781
5782
if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5783
/* Limited discoverable mode */
5784
cp.num_iac = min_t(u8, hdev->num_iac, 2);
5785
cp.iac_lap[0] = 0x00; /* LIAC */
5786
cp.iac_lap[1] = 0x8b;
5787
cp.iac_lap[2] = 0x9e;
5788
cp.iac_lap[3] = 0x33; /* GIAC */
5789
cp.iac_lap[4] = 0x8b;
5790
cp.iac_lap[5] = 0x9e;
5791
} else {
5792
/* General discoverable mode */
5793
cp.num_iac = 1;
5794
cp.iac_lap[0] = 0x33; /* GIAC */
5795
cp.iac_lap[1] = 0x8b;
5796
cp.iac_lap[2] = 0x9e;
5797
}
5798
5799
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5800
(cp.num_iac * 3) + 1, &cp,
5801
HCI_CMD_TIMEOUT);
5802
}
5803
5804
int hci_update_discoverable_sync(struct hci_dev *hdev)
5805
{
5806
int err = 0;
5807
5808
if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5809
err = hci_write_iac_sync(hdev);
5810
if (err)
5811
return err;
5812
5813
err = hci_update_scan_sync(hdev);
5814
if (err)
5815
return err;
5816
5817
err = hci_update_class_sync(hdev);
5818
if (err)
5819
return err;
5820
}
5821
5822
/* Advertising instances don't use the global discoverable setting, so
5823
* only update AD if advertising was enabled using Set Advertising.
5824
*/
5825
if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5826
err = hci_update_adv_data_sync(hdev, 0x00);
5827
if (err)
5828
return err;
5829
5830
/* Discoverable mode affects the local advertising
5831
* address in limited privacy mode.
5832
*/
5833
if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5834
if (ext_adv_capable(hdev))
5835
err = hci_start_ext_adv_sync(hdev, 0x00);
5836
else
5837
err = hci_enable_advertising_sync(hdev);
5838
}
5839
}
5840
5841
return err;
5842
}
5843
5844
static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5845
{
5846
return hci_update_discoverable_sync(hdev);
5847
}
5848
5849
int hci_update_discoverable(struct hci_dev *hdev)
5850
{
5851
/* Only queue if it would have any effect */
5852
if (hdev_is_powered(hdev) &&
5853
hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5854
hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5855
hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5856
return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5857
NULL);
5858
5859
return 0;
5860
}
5861
5862
int hci_update_connectable_sync(struct hci_dev *hdev)
5863
{
5864
int err;
5865
5866
err = hci_update_scan_sync(hdev);
5867
if (err)
5868
return err;
5869
5870
/* If BR/EDR is not enabled and we disable advertising as a
5871
* by-product of disabling connectable, we need to update the
5872
* advertising flags.
5873
*/
5874
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5875
err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5876
5877
/* Update the advertising parameters if necessary */
5878
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5879
!list_empty(&hdev->adv_instances)) {
5880
if (ext_adv_capable(hdev))
5881
err = hci_start_ext_adv_sync(hdev,
5882
hdev->cur_adv_instance);
5883
else
5884
err = hci_enable_advertising_sync(hdev);
5885
5886
if (err)
5887
return err;
5888
}
5889
5890
return hci_update_passive_scan_sync(hdev);
5891
}
5892
5893
int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
5894
{
5895
const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5896
const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5897
struct hci_cp_inquiry cp;
5898
5899
bt_dev_dbg(hdev, "");
5900
5901
if (test_bit(HCI_INQUIRY, &hdev->flags))
5902
return 0;
5903
5904
hci_dev_lock(hdev);
5905
hci_inquiry_cache_flush(hdev);
5906
hci_dev_unlock(hdev);
5907
5908
memset(&cp, 0, sizeof(cp));
5909
5910
if (hdev->discovery.limited)
5911
memcpy(&cp.lap, liac, sizeof(cp.lap));
5912
else
5913
memcpy(&cp.lap, giac, sizeof(cp.lap));
5914
5915
cp.length = length;
5916
cp.num_rsp = num_rsp;
5917
5918
return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5919
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5920
}
5921
5922
static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5923
{
5924
u8 own_addr_type;
5925
/* Accept list is not used for discovery */
5926
u8 filter_policy = 0x00;
5927
/* Default is to enable duplicates filter */
5928
u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5929
int err;
5930
5931
bt_dev_dbg(hdev, "");
5932
5933
/* If controller is scanning, it means the passive scanning is
5934
* running. Thus, we should temporarily stop it in order to set the
5935
* discovery scanning parameters.
5936
*/
5937
err = hci_scan_disable_sync(hdev);
5938
if (err) {
5939
bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5940
return err;
5941
}
5942
5943
cancel_interleave_scan(hdev);
5944
5945
/* Pause address resolution for active scan and stop advertising if
5946
* privacy is enabled.
5947
*/
5948
err = hci_pause_addr_resolution(hdev);
5949
if (err)
5950
goto failed;
5951
5952
/* All active scans will be done with either a resolvable private
5953
* address (when privacy feature has been enabled) or non-resolvable
5954
* private address.
5955
*/
5956
err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5957
&own_addr_type);
5958
if (err < 0)
5959
own_addr_type = ADDR_LE_DEV_PUBLIC;
5960
5961
if (hci_is_adv_monitoring(hdev) ||
5962
(hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER) &&
5963
hdev->discovery.result_filtering)) {
5964
/* Duplicate filter should be disabled when some advertisement
5965
* monitor is activated, otherwise AdvMon can only receive one
5966
* advertisement for one peer(*) during active scanning, and
5967
* might report loss to these peers.
5968
*
5969
* If controller does strict duplicate filtering and the
5970
* discovery requires result filtering disables controller based
5971
* filtering since that can cause reports that would match the
5972
* host filter to not be reported.
5973
*/
5974
filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5975
}
5976
5977
err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5978
hdev->le_scan_window_discovery,
5979
own_addr_type, filter_policy, filter_dup);
5980
if (!err)
5981
return err;
5982
5983
failed:
5984
/* Resume advertising if it was paused */
5985
if (ll_privacy_capable(hdev))
5986
hci_resume_advertising_sync(hdev);
5987
5988
/* Resume passive scanning */
5989
hci_update_passive_scan_sync(hdev);
5990
return err;
5991
}
5992
5993
static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5994
{
5995
int err;
5996
5997
bt_dev_dbg(hdev, "");
5998
5999
err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
6000
if (err)
6001
return err;
6002
6003
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
6004
}
6005
6006
int hci_start_discovery_sync(struct hci_dev *hdev)
6007
{
6008
unsigned long timeout;
6009
int err;
6010
6011
bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
6012
6013
switch (hdev->discovery.type) {
6014
case DISCOV_TYPE_BREDR:
6015
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
6016
case DISCOV_TYPE_INTERLEAVED:
6017
/* When running simultaneous discovery, the LE scanning time
6018
* should occupy the whole discovery time sine BR/EDR inquiry
6019
* and LE scanning are scheduled by the controller.
6020
*
6021
* For interleaving discovery in comparison, BR/EDR inquiry
6022
* and LE scanning are done sequentially with separate
6023
* timeouts.
6024
*/
6025
if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) {
6026
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
6027
/* During simultaneous discovery, we double LE scan
6028
* interval. We must leave some time for the controller
6029
* to do BR/EDR inquiry.
6030
*/
6031
err = hci_start_interleaved_discovery_sync(hdev);
6032
break;
6033
}
6034
6035
timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
6036
err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
6037
break;
6038
case DISCOV_TYPE_LE:
6039
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
6040
err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
6041
break;
6042
default:
6043
return -EINVAL;
6044
}
6045
6046
if (err)
6047
return err;
6048
6049
bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
6050
6051
queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
6052
timeout);
6053
return 0;
6054
}
6055
6056
static void hci_suspend_monitor_sync(struct hci_dev *hdev)
6057
{
6058
switch (hci_get_adv_monitor_offload_ext(hdev)) {
6059
case HCI_ADV_MONITOR_EXT_MSFT:
6060
msft_suspend_sync(hdev);
6061
break;
6062
default:
6063
return;
6064
}
6065
}
6066
6067
/* This function disables discovery and mark it as paused */
6068
static int hci_pause_discovery_sync(struct hci_dev *hdev)
6069
{
6070
int old_state = hdev->discovery.state;
6071
int err;
6072
6073
/* If discovery already stopped/stopping/paused there nothing to do */
6074
if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
6075
hdev->discovery_paused)
6076
return 0;
6077
6078
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6079
err = hci_stop_discovery_sync(hdev);
6080
if (err)
6081
return err;
6082
6083
hdev->discovery_paused = true;
6084
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6085
6086
return 0;
6087
}
6088
6089
static int hci_update_event_filter_sync(struct hci_dev *hdev)
6090
{
6091
struct bdaddr_list_with_flags *b;
6092
u8 scan = SCAN_DISABLED;
6093
bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
6094
int err;
6095
6096
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6097
return 0;
6098
6099
/* Some fake CSR controllers lock up after setting this type of
6100
* filter, so avoid sending the request altogether.
6101
*/
6102
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
6103
return 0;
6104
6105
/* Always clear event filter when starting */
6106
hci_clear_event_filter_sync(hdev);
6107
6108
list_for_each_entry(b, &hdev->accept_list, list) {
6109
if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
6110
continue;
6111
6112
bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
6113
6114
err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
6115
HCI_CONN_SETUP_ALLOW_BDADDR,
6116
&b->bdaddr,
6117
HCI_CONN_SETUP_AUTO_ON);
6118
if (err)
6119
bt_dev_err(hdev, "Failed to set event filter for %pMR",
6120
&b->bdaddr);
6121
else
6122
scan = SCAN_PAGE;
6123
}
6124
6125
if (scan && !scanning)
6126
hci_write_scan_enable_sync(hdev, scan);
6127
else if (!scan && scanning)
6128
hci_write_scan_enable_sync(hdev, scan);
6129
6130
return 0;
6131
}
6132
6133
/* This function disables scan (BR and LE) and mark it as paused */
6134
static int hci_pause_scan_sync(struct hci_dev *hdev)
6135
{
6136
if (hdev->scanning_paused)
6137
return 0;
6138
6139
/* Disable page scan if enabled */
6140
if (test_bit(HCI_PSCAN, &hdev->flags))
6141
hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
6142
6143
hci_scan_disable_sync(hdev);
6144
6145
hdev->scanning_paused = true;
6146
6147
return 0;
6148
}
6149
6150
/* This function performs the HCI suspend procedures in the follow order:
6151
*
6152
* Pause discovery (active scanning/inquiry)
6153
* Pause Directed Advertising/Advertising
6154
* Pause Scanning (passive scanning in case discovery was not active)
6155
* Disconnect all connections
6156
* Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6157
* otherwise:
6158
* Update event mask (only set events that are allowed to wake up the host)
6159
* Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6160
* Update passive scanning (lower duty cycle)
6161
* Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6162
*/
6163
int hci_suspend_sync(struct hci_dev *hdev)
6164
{
6165
int err;
6166
6167
/* If marked as suspended there nothing to do */
6168
if (hdev->suspended)
6169
return 0;
6170
6171
/* Mark device as suspended */
6172
hdev->suspended = true;
6173
6174
/* Pause discovery if not already stopped */
6175
hci_pause_discovery_sync(hdev);
6176
6177
/* Pause other advertisements */
6178
hci_pause_advertising_sync(hdev);
6179
6180
/* Suspend monitor filters */
6181
hci_suspend_monitor_sync(hdev);
6182
6183
/* Prevent disconnects from causing scanning to be re-enabled */
6184
hci_pause_scan_sync(hdev);
6185
6186
if (hci_conn_count(hdev)) {
6187
/* Soft disconnect everything (power off) */
6188
err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6189
if (err) {
6190
/* Set state to BT_RUNNING so resume doesn't notify */
6191
hdev->suspend_state = BT_RUNNING;
6192
hci_resume_sync(hdev);
6193
return err;
6194
}
6195
6196
/* Update event mask so only the allowed event can wakeup the
6197
* host.
6198
*/
6199
hci_set_event_mask_sync(hdev);
6200
}
6201
6202
/* Only configure accept list if disconnect succeeded and wake
6203
* isn't being prevented.
6204
*/
6205
if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6206
hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6207
return 0;
6208
}
6209
6210
/* Unpause to take care of updating scanning params */
6211
hdev->scanning_paused = false;
6212
6213
/* Enable event filter for paired devices */
6214
hci_update_event_filter_sync(hdev);
6215
6216
/* Update LE passive scan if enabled */
6217
hci_update_passive_scan_sync(hdev);
6218
6219
/* Pause scan changes again. */
6220
hdev->scanning_paused = true;
6221
6222
hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6223
6224
return 0;
6225
}
6226
6227
/* This function resumes discovery */
6228
static int hci_resume_discovery_sync(struct hci_dev *hdev)
6229
{
6230
int err;
6231
6232
/* If discovery not paused there nothing to do */
6233
if (!hdev->discovery_paused)
6234
return 0;
6235
6236
hdev->discovery_paused = false;
6237
6238
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6239
6240
err = hci_start_discovery_sync(hdev);
6241
6242
hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6243
DISCOVERY_FINDING);
6244
6245
return err;
6246
}
6247
6248
static void hci_resume_monitor_sync(struct hci_dev *hdev)
6249
{
6250
switch (hci_get_adv_monitor_offload_ext(hdev)) {
6251
case HCI_ADV_MONITOR_EXT_MSFT:
6252
msft_resume_sync(hdev);
6253
break;
6254
default:
6255
return;
6256
}
6257
}
6258
6259
/* This function resume scan and reset paused flag */
6260
static int hci_resume_scan_sync(struct hci_dev *hdev)
6261
{
6262
if (!hdev->scanning_paused)
6263
return 0;
6264
6265
hdev->scanning_paused = false;
6266
6267
hci_update_scan_sync(hdev);
6268
6269
/* Reset passive scanning to normal */
6270
hci_update_passive_scan_sync(hdev);
6271
6272
return 0;
6273
}
6274
6275
/* This function performs the HCI suspend procedures in the follow order:
6276
*
6277
* Restore event mask
6278
* Clear event filter
6279
* Update passive scanning (normal duty cycle)
6280
* Resume Directed Advertising/Advertising
6281
* Resume discovery (active scanning/inquiry)
6282
*/
6283
int hci_resume_sync(struct hci_dev *hdev)
6284
{
6285
/* If not marked as suspended there nothing to do */
6286
if (!hdev->suspended)
6287
return 0;
6288
6289
hdev->suspended = false;
6290
6291
/* Restore event mask */
6292
hci_set_event_mask_sync(hdev);
6293
6294
/* Clear any event filters and restore scan state */
6295
hci_clear_event_filter_sync(hdev);
6296
6297
/* Resume scanning */
6298
hci_resume_scan_sync(hdev);
6299
6300
/* Resume monitor filters */
6301
hci_resume_monitor_sync(hdev);
6302
6303
/* Resume other advertisements */
6304
hci_resume_advertising_sync(hdev);
6305
6306
/* Resume discovery */
6307
hci_resume_discovery_sync(hdev);
6308
6309
return 0;
6310
}
6311
6312
static bool conn_use_rpa(struct hci_conn *conn)
6313
{
6314
struct hci_dev *hdev = conn->hdev;
6315
6316
return hci_dev_test_flag(hdev, HCI_PRIVACY);
6317
}
6318
6319
static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6320
struct hci_conn *conn)
6321
{
6322
struct hci_cp_le_set_ext_adv_params cp;
6323
struct hci_rp_le_set_ext_adv_params rp;
6324
int err;
6325
bdaddr_t random_addr;
6326
u8 own_addr_type;
6327
6328
err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6329
&own_addr_type);
6330
if (err)
6331
return err;
6332
6333
/* Set require_privacy to false so that the remote device has a
6334
* chance of identifying us.
6335
*/
6336
err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6337
&own_addr_type, &random_addr);
6338
if (err)
6339
return err;
6340
6341
memset(&cp, 0, sizeof(cp));
6342
6343
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6344
cp.channel_map = hdev->le_adv_channel_map;
6345
cp.tx_power = HCI_TX_POWER_INVALID;
6346
cp.primary_phy = HCI_ADV_PHY_1M;
6347
cp.secondary_phy = HCI_ADV_PHY_1M;
6348
cp.handle = 0x00; /* Use instance 0 for directed adv */
6349
cp.own_addr_type = own_addr_type;
6350
cp.peer_addr_type = conn->dst_type;
6351
bacpy(&cp.peer_addr, &conn->dst);
6352
6353
/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6354
* advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6355
* does not supports advertising data when the advertising set already
6356
* contains some, the controller shall return erroc code 'Invalid
6357
* HCI Command Parameters(0x12).
6358
* So it is required to remove adv set for handle 0x00. since we use
6359
* instance 0 for directed adv.
6360
*/
6361
err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6362
if (err)
6363
return err;
6364
6365
err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp);
6366
if (err)
6367
return err;
6368
6369
/* Update adv data as tx power is known now */
6370
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
6371
if (err)
6372
return err;
6373
6374
/* Check if random address need to be updated */
6375
if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6376
bacmp(&random_addr, BDADDR_ANY) &&
6377
bacmp(&random_addr, &hdev->random_addr)) {
6378
err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6379
&random_addr);
6380
if (err)
6381
return err;
6382
}
6383
6384
return hci_enable_ext_advertising_sync(hdev, 0x00);
6385
}
6386
6387
static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6388
struct hci_conn *conn)
6389
{
6390
struct hci_cp_le_set_adv_param cp;
6391
u8 status;
6392
u8 own_addr_type;
6393
u8 enable;
6394
6395
if (ext_adv_capable(hdev))
6396
return hci_le_ext_directed_advertising_sync(hdev, conn);
6397
6398
/* Clear the HCI_LE_ADV bit temporarily so that the
6399
* hci_update_random_address knows that it's safe to go ahead
6400
* and write a new random address. The flag will be set back on
6401
* as soon as the SET_ADV_ENABLE HCI command completes.
6402
*/
6403
hci_dev_clear_flag(hdev, HCI_LE_ADV);
6404
6405
/* Set require_privacy to false so that the remote device has a
6406
* chance of identifying us.
6407
*/
6408
status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6409
&own_addr_type);
6410
if (status)
6411
return status;
6412
6413
memset(&cp, 0, sizeof(cp));
6414
6415
/* Some controllers might reject command if intervals are not
6416
* within range for undirected advertising.
6417
* BCM20702A0 is known to be affected by this.
6418
*/
6419
cp.min_interval = cpu_to_le16(0x0020);
6420
cp.max_interval = cpu_to_le16(0x0020);
6421
6422
cp.type = LE_ADV_DIRECT_IND;
6423
cp.own_address_type = own_addr_type;
6424
cp.direct_addr_type = conn->dst_type;
6425
bacpy(&cp.direct_addr, &conn->dst);
6426
cp.channel_map = hdev->le_adv_channel_map;
6427
6428
status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6429
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6430
if (status)
6431
return status;
6432
6433
enable = 0x01;
6434
6435
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6436
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6437
}
6438
6439
static void set_ext_conn_params(struct hci_conn *conn,
6440
struct hci_cp_le_ext_conn_param *p)
6441
{
6442
struct hci_dev *hdev = conn->hdev;
6443
6444
memset(p, 0, sizeof(*p));
6445
6446
p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6447
p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6448
p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6449
p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6450
p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6451
p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6452
p->min_ce_len = cpu_to_le16(0x0000);
6453
p->max_ce_len = cpu_to_le16(0x0000);
6454
}
6455
6456
static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6457
struct hci_conn *conn, u8 own_addr_type)
6458
{
6459
struct hci_cp_le_ext_create_conn *cp;
6460
struct hci_cp_le_ext_conn_param *p;
6461
u8 data[sizeof(*cp) + sizeof(*p) * 3];
6462
u32 plen;
6463
6464
cp = (void *)data;
6465
p = (void *)cp->data;
6466
6467
memset(cp, 0, sizeof(*cp));
6468
6469
bacpy(&cp->peer_addr, &conn->dst);
6470
cp->peer_addr_type = conn->dst_type;
6471
cp->own_addr_type = own_addr_type;
6472
6473
plen = sizeof(*cp);
6474
6475
if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6476
conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6477
cp->phys |= LE_SCAN_PHY_1M;
6478
set_ext_conn_params(conn, p);
6479
6480
p++;
6481
plen += sizeof(*p);
6482
}
6483
6484
if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6485
conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6486
cp->phys |= LE_SCAN_PHY_2M;
6487
set_ext_conn_params(conn, p);
6488
6489
p++;
6490
plen += sizeof(*p);
6491
}
6492
6493
if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6494
conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6495
cp->phys |= LE_SCAN_PHY_CODED;
6496
set_ext_conn_params(conn, p);
6497
6498
plen += sizeof(*p);
6499
}
6500
6501
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6502
plen, data,
6503
HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6504
conn->conn_timeout, NULL);
6505
}
6506
6507
static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6508
{
6509
struct hci_cp_le_create_conn cp;
6510
struct hci_conn_params *params;
6511
u8 own_addr_type;
6512
int err;
6513
struct hci_conn *conn = data;
6514
6515
if (!hci_conn_valid(hdev, conn))
6516
return -ECANCELED;
6517
6518
bt_dev_dbg(hdev, "conn %p", conn);
6519
6520
clear_bit(HCI_CONN_SCANNING, &conn->flags);
6521
conn->state = BT_CONNECT;
6522
6523
/* If requested to connect as peripheral use directed advertising */
6524
if (conn->role == HCI_ROLE_SLAVE) {
6525
/* If we're active scanning and simultaneous roles is not
6526
* enabled simply reject the attempt.
6527
*/
6528
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6529
hdev->le_scan_type == LE_SCAN_ACTIVE &&
6530
!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6531
hci_conn_del(conn);
6532
return -EBUSY;
6533
}
6534
6535
/* Pause advertising while doing directed advertising. */
6536
hci_pause_advertising_sync(hdev);
6537
6538
err = hci_le_directed_advertising_sync(hdev, conn);
6539
goto done;
6540
}
6541
6542
/* Disable advertising if simultaneous roles is not in use. */
6543
if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6544
hci_pause_advertising_sync(hdev);
6545
6546
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6547
if (params) {
6548
conn->le_conn_min_interval = params->conn_min_interval;
6549
conn->le_conn_max_interval = params->conn_max_interval;
6550
conn->le_conn_latency = params->conn_latency;
6551
conn->le_supv_timeout = params->supervision_timeout;
6552
} else {
6553
conn->le_conn_min_interval = hdev->le_conn_min_interval;
6554
conn->le_conn_max_interval = hdev->le_conn_max_interval;
6555
conn->le_conn_latency = hdev->le_conn_latency;
6556
conn->le_supv_timeout = hdev->le_supv_timeout;
6557
}
6558
6559
/* If controller is scanning, we stop it since some controllers are
6560
* not able to scan and connect at the same time. Also set the
6561
* HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6562
* handler for scan disabling knows to set the correct discovery
6563
* state.
6564
*/
6565
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6566
hci_scan_disable_sync(hdev);
6567
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6568
}
6569
6570
/* Update random address, but set require_privacy to false so
6571
* that we never connect with an non-resolvable address.
6572
*/
6573
err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6574
&own_addr_type);
6575
if (err)
6576
goto done;
6577
/* Send command LE Extended Create Connection if supported */
6578
if (use_ext_conn(hdev)) {
6579
err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6580
goto done;
6581
}
6582
6583
memset(&cp, 0, sizeof(cp));
6584
6585
cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6586
cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6587
6588
bacpy(&cp.peer_addr, &conn->dst);
6589
cp.peer_addr_type = conn->dst_type;
6590
cp.own_address_type = own_addr_type;
6591
cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6592
cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6593
cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6594
cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6595
cp.min_ce_len = cpu_to_le16(0x0000);
6596
cp.max_ce_len = cpu_to_le16(0x0000);
6597
6598
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6599
*
6600
* If this event is unmasked and the HCI_LE_Connection_Complete event
6601
* is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6602
* sent when a new connection has been created.
6603
*/
6604
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6605
sizeof(cp), &cp,
6606
use_enhanced_conn_complete(hdev) ?
6607
HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6608
HCI_EV_LE_CONN_COMPLETE,
6609
conn->conn_timeout, NULL);
6610
6611
done:
6612
if (err == -ETIMEDOUT)
6613
hci_le_connect_cancel_sync(hdev, conn, 0x00);
6614
6615
/* Re-enable advertising after the connection attempt is finished. */
6616
hci_resume_advertising_sync(hdev);
6617
return err;
6618
}
6619
6620
int hci_le_create_cis_sync(struct hci_dev *hdev)
6621
{
6622
DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6623
size_t aux_num_cis = 0;
6624
struct hci_conn *conn;
6625
u8 cig = BT_ISO_QOS_CIG_UNSET;
6626
6627
/* The spec allows only one pending LE Create CIS command at a time. If
6628
* the command is pending now, don't do anything. We check for pending
6629
* connections after each CIS Established event.
6630
*
6631
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6632
* page 2566:
6633
*
6634
* If the Host issues this command before all the
6635
* HCI_LE_CIS_Established events from the previous use of the
6636
* command have been generated, the Controller shall return the
6637
* error code Command Disallowed (0x0C).
6638
*
6639
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6640
* page 2567:
6641
*
6642
* When the Controller receives the HCI_LE_Create_CIS command, the
6643
* Controller sends the HCI_Command_Status event to the Host. An
6644
* HCI_LE_CIS_Established event will be generated for each CIS when it
6645
* is established or if it is disconnected or considered lost before
6646
* being established; until all the events are generated, the command
6647
* remains pending.
6648
*/
6649
6650
hci_dev_lock(hdev);
6651
6652
rcu_read_lock();
6653
6654
/* Wait until previous Create CIS has completed */
6655
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6656
if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6657
goto done;
6658
}
6659
6660
/* Find CIG with all CIS ready */
6661
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6662
struct hci_conn *link;
6663
6664
if (hci_conn_check_create_cis(conn))
6665
continue;
6666
6667
cig = conn->iso_qos.ucast.cig;
6668
6669
list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6670
if (hci_conn_check_create_cis(link) > 0 &&
6671
link->iso_qos.ucast.cig == cig &&
6672
link->state != BT_CONNECTED) {
6673
cig = BT_ISO_QOS_CIG_UNSET;
6674
break;
6675
}
6676
}
6677
6678
if (cig != BT_ISO_QOS_CIG_UNSET)
6679
break;
6680
}
6681
6682
if (cig == BT_ISO_QOS_CIG_UNSET)
6683
goto done;
6684
6685
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6686
struct hci_cis *cis = &cmd->cis[aux_num_cis];
6687
6688
if (hci_conn_check_create_cis(conn) ||
6689
conn->iso_qos.ucast.cig != cig)
6690
continue;
6691
6692
set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6693
cis->acl_handle = cpu_to_le16(conn->parent->handle);
6694
cis->cis_handle = cpu_to_le16(conn->handle);
6695
aux_num_cis++;
6696
6697
if (aux_num_cis >= cmd->num_cis)
6698
break;
6699
}
6700
cmd->num_cis = aux_num_cis;
6701
6702
done:
6703
rcu_read_unlock();
6704
6705
hci_dev_unlock(hdev);
6706
6707
if (!aux_num_cis)
6708
return 0;
6709
6710
/* Wait for HCI_LE_CIS_Established */
6711
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6712
struct_size(cmd, cis, cmd->num_cis),
6713
cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6714
conn->conn_timeout, NULL);
6715
}
6716
6717
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6718
{
6719
struct hci_cp_le_remove_cig cp;
6720
6721
memset(&cp, 0, sizeof(cp));
6722
cp.cig_id = handle;
6723
6724
return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6725
&cp, HCI_CMD_TIMEOUT);
6726
}
6727
6728
int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6729
{
6730
struct hci_cp_le_big_term_sync cp;
6731
6732
memset(&cp, 0, sizeof(cp));
6733
cp.handle = handle;
6734
6735
return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6736
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6737
}
6738
6739
int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6740
{
6741
struct hci_cp_le_pa_term_sync cp;
6742
6743
memset(&cp, 0, sizeof(cp));
6744
cp.handle = cpu_to_le16(handle);
6745
6746
return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6747
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6748
}
6749
6750
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6751
bool use_rpa, struct adv_info *adv_instance,
6752
u8 *own_addr_type, bdaddr_t *rand_addr)
6753
{
6754
int err;
6755
6756
bacpy(rand_addr, BDADDR_ANY);
6757
6758
/* If privacy is enabled use a resolvable private address. If
6759
* current RPA has expired then generate a new one.
6760
*/
6761
if (use_rpa) {
6762
/* If Controller supports LL Privacy use own address type is
6763
* 0x03
6764
*/
6765
if (ll_privacy_capable(hdev))
6766
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6767
else
6768
*own_addr_type = ADDR_LE_DEV_RANDOM;
6769
6770
if (adv_instance) {
6771
if (adv_rpa_valid(adv_instance))
6772
return 0;
6773
} else {
6774
if (rpa_valid(hdev))
6775
return 0;
6776
}
6777
6778
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6779
if (err < 0) {
6780
bt_dev_err(hdev, "failed to generate new RPA");
6781
return err;
6782
}
6783
6784
bacpy(rand_addr, &hdev->rpa);
6785
6786
return 0;
6787
}
6788
6789
/* In case of required privacy without resolvable private address,
6790
* use an non-resolvable private address. This is useful for
6791
* non-connectable advertising.
6792
*/
6793
if (require_privacy) {
6794
bdaddr_t nrpa;
6795
6796
while (true) {
6797
/* The non-resolvable private address is generated
6798
* from random six bytes with the two most significant
6799
* bits cleared.
6800
*/
6801
get_random_bytes(&nrpa, 6);
6802
nrpa.b[5] &= 0x3f;
6803
6804
/* The non-resolvable private address shall not be
6805
* equal to the public address.
6806
*/
6807
if (bacmp(&hdev->bdaddr, &nrpa))
6808
break;
6809
}
6810
6811
*own_addr_type = ADDR_LE_DEV_RANDOM;
6812
bacpy(rand_addr, &nrpa);
6813
6814
return 0;
6815
}
6816
6817
/* No privacy, use the current address */
6818
hci_copy_identity_address(hdev, rand_addr, own_addr_type);
6819
6820
return 0;
6821
}
6822
6823
static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6824
{
6825
u8 instance = PTR_UINT(data);
6826
6827
return hci_update_adv_data_sync(hdev, instance);
6828
}
6829
6830
int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6831
{
6832
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6833
UINT_PTR(instance), NULL);
6834
}
6835
6836
static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6837
{
6838
struct hci_conn *conn = data;
6839
struct inquiry_entry *ie;
6840
struct hci_cp_create_conn cp;
6841
int err;
6842
6843
if (!hci_conn_valid(hdev, conn))
6844
return -ECANCELED;
6845
6846
/* Many controllers disallow HCI Create Connection while it is doing
6847
* HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6848
* Connection. This may cause the MGMT discovering state to become false
6849
* without user space's request but it is okay since the MGMT Discovery
6850
* APIs do not promise that discovery should be done forever. Instead,
6851
* the user space monitors the status of MGMT discovering and it may
6852
* request for discovery again when this flag becomes false.
6853
*/
6854
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6855
err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6856
NULL, HCI_CMD_TIMEOUT);
6857
if (err)
6858
bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6859
}
6860
6861
conn->state = BT_CONNECT;
6862
conn->out = true;
6863
conn->role = HCI_ROLE_MASTER;
6864
6865
conn->attempt++;
6866
6867
conn->link_policy = hdev->link_policy;
6868
6869
memset(&cp, 0, sizeof(cp));
6870
bacpy(&cp.bdaddr, &conn->dst);
6871
cp.pscan_rep_mode = 0x02;
6872
6873
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6874
if (ie) {
6875
if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6876
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6877
cp.pscan_mode = ie->data.pscan_mode;
6878
cp.clock_offset = ie->data.clock_offset |
6879
cpu_to_le16(0x8000);
6880
}
6881
6882
memcpy(conn->dev_class, ie->data.dev_class, 3);
6883
}
6884
6885
cp.pkt_type = cpu_to_le16(conn->pkt_type);
6886
if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6887
cp.role_switch = 0x01;
6888
else
6889
cp.role_switch = 0x00;
6890
6891
return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6892
sizeof(cp), &cp,
6893
HCI_EV_CONN_COMPLETE,
6894
conn->conn_timeout, NULL);
6895
}
6896
6897
int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6898
{
6899
return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6900
NULL);
6901
}
6902
6903
static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6904
{
6905
struct hci_conn *conn = data;
6906
6907
bt_dev_dbg(hdev, "err %d", err);
6908
6909
if (err == -ECANCELED)
6910
return;
6911
6912
hci_dev_lock(hdev);
6913
6914
if (!hci_conn_valid(hdev, conn))
6915
goto done;
6916
6917
if (!err) {
6918
hci_connect_le_scan_cleanup(conn, 0x00);
6919
goto done;
6920
}
6921
6922
/* Check if connection is still pending */
6923
if (conn != hci_lookup_le_connect(hdev))
6924
goto done;
6925
6926
/* Flush to make sure we send create conn cancel command if needed */
6927
flush_delayed_work(&conn->le_conn_timeout);
6928
hci_conn_failed(conn, bt_status(err));
6929
6930
done:
6931
hci_dev_unlock(hdev);
6932
}
6933
6934
int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6935
{
6936
return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6937
create_le_conn_complete);
6938
}
6939
6940
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6941
{
6942
if (conn->state != BT_OPEN)
6943
return -EINVAL;
6944
6945
switch (conn->type) {
6946
case ACL_LINK:
6947
return !hci_cmd_sync_dequeue_once(hdev,
6948
hci_acl_create_conn_sync,
6949
conn, NULL);
6950
case LE_LINK:
6951
return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6952
conn, create_le_conn_complete);
6953
}
6954
6955
return -ENOENT;
6956
}
6957
6958
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
6959
struct hci_conn_params *params)
6960
{
6961
struct hci_cp_le_conn_update cp;
6962
6963
memset(&cp, 0, sizeof(cp));
6964
cp.handle = cpu_to_le16(conn->handle);
6965
cp.conn_interval_min = cpu_to_le16(params->conn_min_interval);
6966
cp.conn_interval_max = cpu_to_le16(params->conn_max_interval);
6967
cp.conn_latency = cpu_to_le16(params->conn_latency);
6968
cp.supervision_timeout = cpu_to_le16(params->supervision_timeout);
6969
cp.min_ce_len = cpu_to_le16(0x0000);
6970
cp.max_ce_len = cpu_to_le16(0x0000);
6971
6972
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
6973
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6974
}
6975
6976
static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
6977
{
6978
struct hci_conn *conn = data;
6979
struct hci_conn *pa_sync;
6980
6981
bt_dev_dbg(hdev, "err %d", err);
6982
6983
if (err == -ECANCELED)
6984
return;
6985
6986
hci_dev_lock(hdev);
6987
6988
if (!hci_conn_valid(hdev, conn))
6989
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6990
6991
if (!err)
6992
goto unlock;
6993
6994
/* Add connection to indicate PA sync error */
6995
pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
6996
HCI_ROLE_SLAVE);
6997
6998
if (IS_ERR(pa_sync))
6999
goto unlock;
7000
7001
set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
7002
7003
/* Notify iso layer */
7004
hci_connect_cfm(pa_sync, bt_status(err));
7005
7006
unlock:
7007
hci_dev_unlock(hdev);
7008
}
7009
7010
static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
7011
{
7012
struct hci_cp_le_pa_create_sync cp;
7013
struct hci_conn *conn = data;
7014
struct bt_iso_qos *qos = &conn->iso_qos;
7015
int err;
7016
7017
if (!hci_conn_valid(hdev, conn))
7018
return -ECANCELED;
7019
7020
if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID)
7021
return -EINVAL;
7022
7023
if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
7024
return -EBUSY;
7025
7026
/* Stop scanning if SID has not been set and active scanning is enabled
7027
* so we use passive scanning which will be scanning using the allow
7028
* list programmed to contain only the connection address.
7029
*/
7030
if (conn->sid == HCI_SID_INVALID &&
7031
hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
7032
hci_scan_disable_sync(hdev);
7033
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
7034
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
7035
}
7036
7037
/* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can
7038
* program the address in the allow list so PA advertisements can be
7039
* received.
7040
*/
7041
set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
7042
7043
hci_update_passive_scan_sync(hdev);
7044
7045
/* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
7046
* it.
7047
*/
7048
if (conn->sid == HCI_SID_INVALID) {
7049
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
7050
HCI_EV_LE_EXT_ADV_REPORT,
7051
conn->conn_timeout, NULL);
7052
if (err == -ETIMEDOUT)
7053
goto done;
7054
}
7055
7056
memset(&cp, 0, sizeof(cp));
7057
cp.options = qos->bcast.options;
7058
cp.sid = conn->sid;
7059
cp.addr_type = conn->dst_type;
7060
bacpy(&cp.addr, &conn->dst);
7061
cp.skip = cpu_to_le16(qos->bcast.skip);
7062
cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
7063
cp.sync_cte_type = qos->bcast.sync_cte_type;
7064
7065
/* The spec allows only one pending LE Periodic Advertising Create
7066
* Sync command at a time so we forcefully wait for PA Sync Established
7067
* event since cmd_work can only schedule one command at a time.
7068
*
7069
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
7070
* page 2493:
7071
*
7072
* If the Host issues this command when another HCI_LE_Periodic_
7073
* Advertising_Create_Sync command is pending, the Controller shall
7074
* return the error code Command Disallowed (0x0C).
7075
*/
7076
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC,
7077
sizeof(cp), &cp,
7078
HCI_EV_LE_PA_SYNC_ESTABLISHED,
7079
conn->conn_timeout, NULL);
7080
if (err == -ETIMEDOUT)
7081
__hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
7082
0, NULL, HCI_CMD_TIMEOUT);
7083
7084
done:
7085
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
7086
7087
/* Update passive scan since HCI_PA_SYNC flag has been cleared */
7088
hci_update_passive_scan_sync(hdev);
7089
7090
return err;
7091
}
7092
7093
int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn)
7094
{
7095
return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn,
7096
create_pa_complete);
7097
}
7098
7099
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
7100
{
7101
struct hci_conn *conn = data;
7102
7103
bt_dev_dbg(hdev, "err %d", err);
7104
7105
if (err == -ECANCELED)
7106
return;
7107
7108
if (hci_conn_valid(hdev, conn))
7109
clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
7110
}
7111
7112
static int hci_le_big_create_sync(struct hci_dev *hdev, void *data)
7113
{
7114
DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11);
7115
struct hci_conn *conn = data;
7116
struct bt_iso_qos *qos = &conn->iso_qos;
7117
int err;
7118
7119
if (!hci_conn_valid(hdev, conn))
7120
return -ECANCELED;
7121
7122
set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
7123
7124
memset(cp, 0, sizeof(*cp));
7125
cp->handle = qos->bcast.big;
7126
cp->sync_handle = cpu_to_le16(conn->sync_handle);
7127
cp->encryption = qos->bcast.encryption;
7128
memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode));
7129
cp->mse = qos->bcast.mse;
7130
cp->timeout = cpu_to_le16(qos->bcast.timeout);
7131
cp->num_bis = conn->num_bis;
7132
memcpy(cp->bis, conn->bis, conn->num_bis);
7133
7134
/* The spec allows only one pending LE BIG Create Sync command at
7135
* a time, so we forcefully wait for BIG Sync Established event since
7136
* cmd_work can only schedule one command at a time.
7137
*
7138
* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
7139
* page 2586:
7140
*
7141
* If the Host sends this command when the Controller is in the
7142
* process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
7143
* Established event has not been generated, the Controller shall
7144
* return the error code Command Disallowed (0x0C).
7145
*/
7146
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
7147
struct_size(cp, bis, cp->num_bis), cp,
7148
HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7149
conn->conn_timeout, NULL);
7150
if (err == -ETIMEDOUT)
7151
hci_le_big_terminate_sync(hdev, cp->handle);
7152
7153
return err;
7154
}
7155
7156
int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn)
7157
{
7158
return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn,
7159
create_big_complete);
7160
}
7161
7162