Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/hci_event.c
49646 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
Copyright 2023-2024 NXP
5
6
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
7
8
This program is free software; you can redistribute it and/or modify
9
it under the terms of the GNU General Public License version 2 as
10
published by the Free Software Foundation;
11
12
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23
SOFTWARE IS DISCLAIMED.
24
*/
25
26
/* Bluetooth HCI event handling. */
27
28
#include <linux/unaligned.h>
29
#include <linux/crypto.h>
30
#include <crypto/algapi.h>
31
32
#include <net/bluetooth/bluetooth.h>
33
#include <net/bluetooth/hci_core.h>
34
#include <net/bluetooth/mgmt.h>
35
36
#include "hci_debugfs.h"
37
#include "hci_codec.h"
38
#include "smp.h"
39
#include "msft.h"
40
#include "eir.h"
41
42
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43
"\x00\x00\x00\x00\x00\x00\x00\x00"
44
45
/* Handle HCI Event packets */
46
47
static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
48
u8 ev, size_t len)
49
{
50
void *data;
51
52
data = skb_pull_data(skb, len);
53
if (!data)
54
bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
55
56
return data;
57
}
58
59
static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
60
u16 op, size_t len)
61
{
62
void *data;
63
64
data = skb_pull_data(skb, len);
65
if (!data)
66
bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
67
68
return data;
69
}
70
71
static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
72
u8 ev, size_t len)
73
{
74
void *data;
75
76
data = skb_pull_data(skb, len);
77
if (!data)
78
bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
79
80
return data;
81
}
82
83
static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
84
struct sk_buff *skb)
85
{
86
struct hci_ev_status *rp = data;
87
88
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
89
90
/* It is possible that we receive Inquiry Complete event right
91
* before we receive Inquiry Cancel Command Complete event, in
92
* which case the latter event should have status of Command
93
* Disallowed. This should not be treated as error, since
94
* we actually achieve what Inquiry Cancel wants to achieve,
95
* which is to end the last Inquiry session.
96
*/
97
if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
98
bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
99
rp->status = 0x00;
100
}
101
102
if (rp->status)
103
return rp->status;
104
105
clear_bit(HCI_INQUIRY, &hdev->flags);
106
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
107
wake_up_bit(&hdev->flags, HCI_INQUIRY);
108
109
hci_dev_lock(hdev);
110
/* Set discovery state to stopped if we're not doing LE active
111
* scanning.
112
*/
113
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
114
hdev->le_scan_type != LE_SCAN_ACTIVE)
115
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
116
hci_dev_unlock(hdev);
117
118
return rp->status;
119
}
120
121
static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
122
struct sk_buff *skb)
123
{
124
struct hci_ev_status *rp = data;
125
126
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
127
128
if (rp->status)
129
return rp->status;
130
131
hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
132
133
return rp->status;
134
}
135
136
static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
137
struct sk_buff *skb)
138
{
139
struct hci_ev_status *rp = data;
140
141
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
142
143
if (rp->status)
144
return rp->status;
145
146
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
147
148
return rp->status;
149
}
150
151
static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
152
struct sk_buff *skb)
153
{
154
struct hci_rp_remote_name_req_cancel *rp = data;
155
156
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
157
158
return rp->status;
159
}
160
161
static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
162
struct sk_buff *skb)
163
{
164
struct hci_rp_role_discovery *rp = data;
165
struct hci_conn *conn;
166
167
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
168
169
if (rp->status)
170
return rp->status;
171
172
hci_dev_lock(hdev);
173
174
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175
if (conn)
176
conn->role = rp->role;
177
178
hci_dev_unlock(hdev);
179
180
return rp->status;
181
}
182
183
static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
184
struct sk_buff *skb)
185
{
186
struct hci_rp_read_link_policy *rp = data;
187
struct hci_conn *conn;
188
189
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
190
191
if (rp->status)
192
return rp->status;
193
194
hci_dev_lock(hdev);
195
196
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
197
if (conn)
198
conn->link_policy = __le16_to_cpu(rp->policy);
199
200
hci_dev_unlock(hdev);
201
202
return rp->status;
203
}
204
205
static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
206
struct sk_buff *skb)
207
{
208
struct hci_rp_write_link_policy *rp = data;
209
struct hci_conn *conn;
210
void *sent;
211
212
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
213
214
if (rp->status)
215
return rp->status;
216
217
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
218
if (!sent)
219
return rp->status;
220
221
hci_dev_lock(hdev);
222
223
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
224
if (conn)
225
conn->link_policy = get_unaligned_le16(sent + 2);
226
227
hci_dev_unlock(hdev);
228
229
return rp->status;
230
}
231
232
static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
233
struct sk_buff *skb)
234
{
235
struct hci_rp_read_def_link_policy *rp = data;
236
237
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
238
239
if (rp->status)
240
return rp->status;
241
242
hdev->link_policy = __le16_to_cpu(rp->policy);
243
244
return rp->status;
245
}
246
247
static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
248
struct sk_buff *skb)
249
{
250
struct hci_ev_status *rp = data;
251
void *sent;
252
253
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
254
255
if (rp->status)
256
return rp->status;
257
258
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
259
if (!sent)
260
return rp->status;
261
262
hdev->link_policy = get_unaligned_le16(sent);
263
264
return rp->status;
265
}
266
267
static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
268
{
269
struct hci_ev_status *rp = data;
270
271
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
272
273
clear_bit(HCI_RESET, &hdev->flags);
274
275
if (rp->status)
276
return rp->status;
277
278
/* Reset all non-persistent flags */
279
hci_dev_clear_volatile_flags(hdev);
280
281
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
282
283
hdev->inq_tx_power = HCI_TX_POWER_INVALID;
284
hdev->adv_tx_power = HCI_TX_POWER_INVALID;
285
286
memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
287
hdev->adv_data_len = 0;
288
289
memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
290
hdev->scan_rsp_data_len = 0;
291
292
hdev->le_scan_type = LE_SCAN_PASSIVE;
293
294
hdev->ssp_debug_mode = 0;
295
296
hci_bdaddr_list_clear(&hdev->le_accept_list);
297
hci_bdaddr_list_clear(&hdev->le_resolv_list);
298
299
return rp->status;
300
}
301
302
static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
303
struct sk_buff *skb)
304
{
305
struct hci_rp_read_stored_link_key *rp = data;
306
struct hci_cp_read_stored_link_key *sent;
307
308
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
309
310
sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
311
if (!sent)
312
return rp->status;
313
314
if (!rp->status && sent->read_all == 0x01) {
315
hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
316
hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
317
}
318
319
return rp->status;
320
}
321
322
static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
323
struct sk_buff *skb)
324
{
325
struct hci_rp_delete_stored_link_key *rp = data;
326
u16 num_keys;
327
328
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
329
330
if (rp->status)
331
return rp->status;
332
333
num_keys = le16_to_cpu(rp->num_keys);
334
335
if (num_keys <= hdev->stored_num_keys)
336
hdev->stored_num_keys -= num_keys;
337
else
338
hdev->stored_num_keys = 0;
339
340
return rp->status;
341
}
342
343
static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
344
struct sk_buff *skb)
345
{
346
struct hci_ev_status *rp = data;
347
void *sent;
348
349
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
350
351
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
352
if (!sent)
353
return rp->status;
354
355
hci_dev_lock(hdev);
356
357
if (hci_dev_test_flag(hdev, HCI_MGMT))
358
mgmt_set_local_name_complete(hdev, sent, rp->status);
359
else if (!rp->status)
360
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
361
362
hci_dev_unlock(hdev);
363
364
return rp->status;
365
}
366
367
static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
368
struct sk_buff *skb)
369
{
370
struct hci_rp_read_local_name *rp = data;
371
372
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
373
374
if (rp->status)
375
return rp->status;
376
377
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
378
hci_dev_test_flag(hdev, HCI_CONFIG))
379
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
380
381
return rp->status;
382
}
383
384
static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
385
struct sk_buff *skb)
386
{
387
struct hci_ev_status *rp = data;
388
void *sent;
389
390
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
391
392
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
393
if (!sent)
394
return rp->status;
395
396
hci_dev_lock(hdev);
397
398
if (!rp->status) {
399
__u8 param = *((__u8 *) sent);
400
401
if (param == AUTH_ENABLED)
402
set_bit(HCI_AUTH, &hdev->flags);
403
else
404
clear_bit(HCI_AUTH, &hdev->flags);
405
}
406
407
if (hci_dev_test_flag(hdev, HCI_MGMT))
408
mgmt_auth_enable_complete(hdev, rp->status);
409
410
hci_dev_unlock(hdev);
411
412
return rp->status;
413
}
414
415
static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
416
struct sk_buff *skb)
417
{
418
struct hci_ev_status *rp = data;
419
__u8 param;
420
void *sent;
421
422
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
423
424
if (rp->status)
425
return rp->status;
426
427
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
428
if (!sent)
429
return rp->status;
430
431
param = *((__u8 *) sent);
432
433
if (param)
434
set_bit(HCI_ENCRYPT, &hdev->flags);
435
else
436
clear_bit(HCI_ENCRYPT, &hdev->flags);
437
438
return rp->status;
439
}
440
441
static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
442
struct sk_buff *skb)
443
{
444
struct hci_ev_status *rp = data;
445
__u8 param;
446
void *sent;
447
448
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
449
450
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
451
if (!sent)
452
return rp->status;
453
454
param = *((__u8 *) sent);
455
456
hci_dev_lock(hdev);
457
458
if (rp->status) {
459
hdev->discov_timeout = 0;
460
goto done;
461
}
462
463
if (param & SCAN_INQUIRY)
464
set_bit(HCI_ISCAN, &hdev->flags);
465
else
466
clear_bit(HCI_ISCAN, &hdev->flags);
467
468
if (param & SCAN_PAGE)
469
set_bit(HCI_PSCAN, &hdev->flags);
470
else
471
clear_bit(HCI_PSCAN, &hdev->flags);
472
473
done:
474
hci_dev_unlock(hdev);
475
476
return rp->status;
477
}
478
479
static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
480
struct sk_buff *skb)
481
{
482
struct hci_ev_status *rp = data;
483
struct hci_cp_set_event_filter *cp;
484
void *sent;
485
486
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
487
488
if (rp->status)
489
return rp->status;
490
491
sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
492
if (!sent)
493
return rp->status;
494
495
cp = (struct hci_cp_set_event_filter *)sent;
496
497
if (cp->flt_type == HCI_FLT_CLEAR_ALL)
498
hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
499
else
500
hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
501
502
return rp->status;
503
}
504
505
static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
506
struct sk_buff *skb)
507
{
508
struct hci_rp_read_class_of_dev *rp = data;
509
510
if (WARN_ON(!hdev))
511
return HCI_ERROR_UNSPECIFIED;
512
513
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
514
515
if (rp->status)
516
return rp->status;
517
518
memcpy(hdev->dev_class, rp->dev_class, 3);
519
520
bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
521
hdev->dev_class[1], hdev->dev_class[0]);
522
523
return rp->status;
524
}
525
526
static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
527
struct sk_buff *skb)
528
{
529
struct hci_ev_status *rp = data;
530
void *sent;
531
532
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
533
534
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
535
if (!sent)
536
return rp->status;
537
538
hci_dev_lock(hdev);
539
540
if (!rp->status)
541
memcpy(hdev->dev_class, sent, 3);
542
543
if (hci_dev_test_flag(hdev, HCI_MGMT))
544
mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
545
546
hci_dev_unlock(hdev);
547
548
return rp->status;
549
}
550
551
static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
552
struct sk_buff *skb)
553
{
554
struct hci_rp_read_voice_setting *rp = data;
555
__u16 setting;
556
557
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
558
559
if (rp->status)
560
return rp->status;
561
562
setting = __le16_to_cpu(rp->voice_setting);
563
564
if (hdev->voice_setting == setting)
565
return rp->status;
566
567
hdev->voice_setting = setting;
568
569
bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
570
571
if (hdev->notify)
572
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
573
574
return rp->status;
575
}
576
577
static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
578
struct sk_buff *skb)
579
{
580
struct hci_ev_status *rp = data;
581
__u16 setting;
582
void *sent;
583
584
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
585
586
if (rp->status)
587
return rp->status;
588
589
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
590
if (!sent)
591
return rp->status;
592
593
setting = get_unaligned_le16(sent);
594
595
if (hdev->voice_setting == setting)
596
return rp->status;
597
598
hdev->voice_setting = setting;
599
600
bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
601
602
if (hdev->notify)
603
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
604
605
return rp->status;
606
}
607
608
static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
609
struct sk_buff *skb)
610
{
611
struct hci_rp_read_num_supported_iac *rp = data;
612
613
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
614
615
if (rp->status)
616
return rp->status;
617
618
hdev->num_iac = rp->num_iac;
619
620
bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
621
622
return rp->status;
623
}
624
625
static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
626
struct sk_buff *skb)
627
{
628
struct hci_ev_status *rp = data;
629
struct hci_cp_write_ssp_mode *sent;
630
631
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
632
633
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
634
if (!sent)
635
return rp->status;
636
637
hci_dev_lock(hdev);
638
639
if (!rp->status) {
640
if (sent->mode)
641
hdev->features[1][0] |= LMP_HOST_SSP;
642
else
643
hdev->features[1][0] &= ~LMP_HOST_SSP;
644
}
645
646
if (!rp->status) {
647
if (sent->mode)
648
hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
649
else
650
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
651
}
652
653
hci_dev_unlock(hdev);
654
655
return rp->status;
656
}
657
658
static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
659
struct sk_buff *skb)
660
{
661
struct hci_ev_status *rp = data;
662
struct hci_cp_write_sc_support *sent;
663
664
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
665
666
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
667
if (!sent)
668
return rp->status;
669
670
hci_dev_lock(hdev);
671
672
if (!rp->status) {
673
if (sent->support)
674
hdev->features[1][0] |= LMP_HOST_SC;
675
else
676
hdev->features[1][0] &= ~LMP_HOST_SC;
677
}
678
679
if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
680
if (sent->support)
681
hci_dev_set_flag(hdev, HCI_SC_ENABLED);
682
else
683
hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
684
}
685
686
hci_dev_unlock(hdev);
687
688
return rp->status;
689
}
690
691
static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
692
struct sk_buff *skb)
693
{
694
struct hci_rp_read_local_version *rp = data;
695
696
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
697
698
if (rp->status)
699
return rp->status;
700
701
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
702
hci_dev_test_flag(hdev, HCI_CONFIG)) {
703
hdev->hci_ver = rp->hci_ver;
704
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
705
hdev->lmp_ver = rp->lmp_ver;
706
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
707
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
708
}
709
710
return rp->status;
711
}
712
713
static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
714
struct sk_buff *skb)
715
{
716
struct hci_rp_read_enc_key_size *rp = data;
717
struct hci_conn *conn;
718
u16 handle;
719
u8 status = rp->status;
720
721
bt_dev_dbg(hdev, "status 0x%2.2x", status);
722
723
handle = le16_to_cpu(rp->handle);
724
725
hci_dev_lock(hdev);
726
727
conn = hci_conn_hash_lookup_handle(hdev, handle);
728
if (!conn) {
729
status = 0xFF;
730
goto done;
731
}
732
733
/* While unexpected, the read_enc_key_size command may fail. The most
734
* secure approach is to then assume the key size is 0 to force a
735
* disconnection.
736
*/
737
if (status) {
738
bt_dev_err(hdev, "failed to read key size for handle %u",
739
handle);
740
conn->enc_key_size = 0;
741
} else {
742
u8 *key_enc_size = hci_conn_key_enc_size(conn);
743
744
conn->enc_key_size = rp->key_size;
745
status = 0;
746
747
/* Attempt to check if the key size is too small or if it has
748
* been downgraded from the last time it was stored as part of
749
* the link_key.
750
*/
751
if (conn->enc_key_size < hdev->min_enc_key_size ||
752
(key_enc_size && conn->enc_key_size < *key_enc_size)) {
753
/* As slave role, the conn->state has been set to
754
* BT_CONNECTED and l2cap conn req might not be received
755
* yet, at this moment the l2cap layer almost does
756
* nothing with the non-zero status.
757
* So we also clear encrypt related bits, and then the
758
* handler of l2cap conn req will get the right secure
759
* state at a later time.
760
*/
761
status = HCI_ERROR_AUTH_FAILURE;
762
clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
763
clear_bit(HCI_CONN_AES_CCM, &conn->flags);
764
}
765
766
/* Update the key encryption size with the connection one */
767
if (key_enc_size && *key_enc_size != conn->enc_key_size)
768
*key_enc_size = conn->enc_key_size;
769
}
770
771
hci_encrypt_cfm(conn, status);
772
773
done:
774
hci_dev_unlock(hdev);
775
776
return status;
777
}
778
779
static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
780
struct sk_buff *skb)
781
{
782
struct hci_rp_read_local_commands *rp = data;
783
784
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
785
786
if (rp->status)
787
return rp->status;
788
789
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
790
hci_dev_test_flag(hdev, HCI_CONFIG))
791
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
792
793
return rp->status;
794
}
795
796
static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
797
struct sk_buff *skb)
798
{
799
struct hci_rp_read_auth_payload_to *rp = data;
800
struct hci_conn *conn;
801
802
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
803
804
if (rp->status)
805
return rp->status;
806
807
hci_dev_lock(hdev);
808
809
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
810
if (conn)
811
conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
812
813
hci_dev_unlock(hdev);
814
815
return rp->status;
816
}
817
818
static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
819
struct sk_buff *skb)
820
{
821
struct hci_rp_write_auth_payload_to *rp = data;
822
struct hci_conn *conn;
823
void *sent;
824
825
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
826
827
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
828
if (!sent)
829
return rp->status;
830
831
hci_dev_lock(hdev);
832
833
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
834
if (!conn) {
835
rp->status = 0xff;
836
goto unlock;
837
}
838
839
if (!rp->status)
840
conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
841
842
unlock:
843
hci_dev_unlock(hdev);
844
845
return rp->status;
846
}
847
848
static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
849
struct sk_buff *skb)
850
{
851
struct hci_rp_read_local_features *rp = data;
852
853
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
854
855
if (rp->status)
856
return rp->status;
857
858
memcpy(hdev->features, rp->features, 8);
859
860
/* Adjust default settings according to features
861
* supported by device. */
862
863
if (hdev->features[0][0] & LMP_3SLOT)
864
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
865
866
if (hdev->features[0][0] & LMP_5SLOT)
867
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
868
869
if (hdev->features[0][1] & LMP_HV2) {
870
hdev->pkt_type |= (HCI_HV2);
871
hdev->esco_type |= (ESCO_HV2);
872
}
873
874
if (hdev->features[0][1] & LMP_HV3) {
875
hdev->pkt_type |= (HCI_HV3);
876
hdev->esco_type |= (ESCO_HV3);
877
}
878
879
if (lmp_esco_capable(hdev))
880
hdev->esco_type |= (ESCO_EV3);
881
882
if (hdev->features[0][4] & LMP_EV4)
883
hdev->esco_type |= (ESCO_EV4);
884
885
if (hdev->features[0][4] & LMP_EV5)
886
hdev->esco_type |= (ESCO_EV5);
887
888
if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
889
hdev->esco_type |= (ESCO_2EV3);
890
891
if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
892
hdev->esco_type |= (ESCO_3EV3);
893
894
if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
895
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
896
897
return rp->status;
898
}
899
900
static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
901
struct sk_buff *skb)
902
{
903
struct hci_rp_read_local_ext_features *rp = data;
904
905
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
906
907
if (rp->status)
908
return rp->status;
909
910
if (hdev->max_page < rp->max_page) {
911
if (hci_test_quirk(hdev,
912
HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2))
913
bt_dev_warn(hdev, "broken local ext features page 2");
914
else
915
hdev->max_page = rp->max_page;
916
}
917
918
if (rp->page < HCI_MAX_PAGES)
919
memcpy(hdev->features[rp->page], rp->features, 8);
920
921
return rp->status;
922
}
923
924
static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
925
struct sk_buff *skb)
926
{
927
struct hci_rp_read_buffer_size *rp = data;
928
929
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
930
931
if (rp->status)
932
return rp->status;
933
934
hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
935
hdev->sco_mtu = rp->sco_mtu;
936
hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
937
hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
938
939
if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) {
940
hdev->sco_mtu = 64;
941
hdev->sco_pkts = 8;
942
}
943
944
if (!read_voice_setting_capable(hdev))
945
hdev->sco_pkts = 0;
946
947
hdev->acl_cnt = hdev->acl_pkts;
948
hdev->sco_cnt = hdev->sco_pkts;
949
950
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
951
hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
952
953
if (!hdev->acl_mtu || !hdev->acl_pkts)
954
return HCI_ERROR_INVALID_PARAMETERS;
955
956
return rp->status;
957
}
958
959
static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
960
struct sk_buff *skb)
961
{
962
struct hci_rp_read_bd_addr *rp = data;
963
964
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
965
966
if (rp->status)
967
return rp->status;
968
969
if (test_bit(HCI_INIT, &hdev->flags))
970
bacpy(&hdev->bdaddr, &rp->bdaddr);
971
972
if (hci_dev_test_flag(hdev, HCI_SETUP))
973
bacpy(&hdev->setup_addr, &rp->bdaddr);
974
975
return rp->status;
976
}
977
978
static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
979
struct sk_buff *skb)
980
{
981
struct hci_rp_read_local_pairing_opts *rp = data;
982
983
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
984
985
if (rp->status)
986
return rp->status;
987
988
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
989
hci_dev_test_flag(hdev, HCI_CONFIG)) {
990
hdev->pairing_opts = rp->pairing_opts;
991
hdev->max_enc_key_size = rp->max_key_size;
992
}
993
994
return rp->status;
995
}
996
997
static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
998
struct sk_buff *skb)
999
{
1000
struct hci_rp_read_page_scan_activity *rp = data;
1001
1002
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1003
1004
if (rp->status)
1005
return rp->status;
1006
1007
if (test_bit(HCI_INIT, &hdev->flags)) {
1008
hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1009
hdev->page_scan_window = __le16_to_cpu(rp->window);
1010
}
1011
1012
return rp->status;
1013
}
1014
1015
static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1016
struct sk_buff *skb)
1017
{
1018
struct hci_ev_status *rp = data;
1019
struct hci_cp_write_page_scan_activity *sent;
1020
1021
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1022
1023
if (rp->status)
1024
return rp->status;
1025
1026
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1027
if (!sent)
1028
return rp->status;
1029
1030
hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1031
hdev->page_scan_window = __le16_to_cpu(sent->window);
1032
1033
return rp->status;
1034
}
1035
1036
static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1037
struct sk_buff *skb)
1038
{
1039
struct hci_rp_read_page_scan_type *rp = data;
1040
1041
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1042
1043
if (rp->status)
1044
return rp->status;
1045
1046
if (test_bit(HCI_INIT, &hdev->flags))
1047
hdev->page_scan_type = rp->type;
1048
1049
return rp->status;
1050
}
1051
1052
static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1053
struct sk_buff *skb)
1054
{
1055
struct hci_ev_status *rp = data;
1056
u8 *type;
1057
1058
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1059
1060
if (rp->status)
1061
return rp->status;
1062
1063
type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1064
if (type)
1065
hdev->page_scan_type = *type;
1066
1067
return rp->status;
1068
}
1069
1070
static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1071
struct sk_buff *skb)
1072
{
1073
struct hci_rp_read_clock *rp = data;
1074
struct hci_cp_read_clock *cp;
1075
struct hci_conn *conn;
1076
1077
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1078
1079
if (rp->status)
1080
return rp->status;
1081
1082
hci_dev_lock(hdev);
1083
1084
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1085
if (!cp)
1086
goto unlock;
1087
1088
if (cp->which == 0x00) {
1089
hdev->clock = le32_to_cpu(rp->clock);
1090
goto unlock;
1091
}
1092
1093
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1094
if (conn) {
1095
conn->clock = le32_to_cpu(rp->clock);
1096
conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1097
}
1098
1099
unlock:
1100
hci_dev_unlock(hdev);
1101
return rp->status;
1102
}
1103
1104
static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1105
struct sk_buff *skb)
1106
{
1107
struct hci_rp_read_inq_rsp_tx_power *rp = data;
1108
1109
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1110
1111
if (rp->status)
1112
return rp->status;
1113
1114
hdev->inq_tx_power = rp->tx_power;
1115
1116
return rp->status;
1117
}
1118
1119
static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1120
struct sk_buff *skb)
1121
{
1122
struct hci_rp_read_def_err_data_reporting *rp = data;
1123
1124
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1125
1126
if (rp->status)
1127
return rp->status;
1128
1129
hdev->err_data_reporting = rp->err_data_reporting;
1130
1131
return rp->status;
1132
}
1133
1134
static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1135
struct sk_buff *skb)
1136
{
1137
struct hci_ev_status *rp = data;
1138
struct hci_cp_write_def_err_data_reporting *cp;
1139
1140
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1141
1142
if (rp->status)
1143
return rp->status;
1144
1145
cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1146
if (!cp)
1147
return rp->status;
1148
1149
hdev->err_data_reporting = cp->err_data_reporting;
1150
1151
return rp->status;
1152
}
1153
1154
static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1155
struct sk_buff *skb)
1156
{
1157
struct hci_rp_pin_code_reply *rp = data;
1158
struct hci_cp_pin_code_reply *cp;
1159
struct hci_conn *conn;
1160
1161
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1162
1163
hci_dev_lock(hdev);
1164
1165
if (hci_dev_test_flag(hdev, HCI_MGMT))
1166
mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1167
1168
if (rp->status)
1169
goto unlock;
1170
1171
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1172
if (!cp)
1173
goto unlock;
1174
1175
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1176
if (conn)
1177
conn->pin_length = cp->pin_len;
1178
1179
unlock:
1180
hci_dev_unlock(hdev);
1181
return rp->status;
1182
}
1183
1184
static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1185
struct sk_buff *skb)
1186
{
1187
struct hci_rp_pin_code_neg_reply *rp = data;
1188
1189
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1190
1191
hci_dev_lock(hdev);
1192
1193
if (hci_dev_test_flag(hdev, HCI_MGMT))
1194
mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1195
rp->status);
1196
1197
hci_dev_unlock(hdev);
1198
1199
return rp->status;
1200
}
1201
1202
static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1203
struct sk_buff *skb)
1204
{
1205
struct hci_rp_le_read_buffer_size *rp = data;
1206
1207
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1208
1209
if (rp->status)
1210
return rp->status;
1211
1212
hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1213
hdev->le_pkts = rp->le_max_pkt;
1214
1215
hdev->le_cnt = hdev->le_pkts;
1216
1217
BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1218
1219
if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
1220
return HCI_ERROR_INVALID_PARAMETERS;
1221
1222
return rp->status;
1223
}
1224
1225
static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1226
struct sk_buff *skb)
1227
{
1228
struct hci_rp_le_read_local_features *rp = data;
1229
1230
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1231
1232
if (rp->status)
1233
return rp->status;
1234
1235
memcpy(hdev->le_features, rp->features, 8);
1236
1237
return rp->status;
1238
}
1239
1240
static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1241
struct sk_buff *skb)
1242
{
1243
struct hci_rp_le_read_adv_tx_power *rp = data;
1244
1245
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1246
1247
if (rp->status)
1248
return rp->status;
1249
1250
hdev->adv_tx_power = rp->tx_power;
1251
1252
return rp->status;
1253
}
1254
1255
static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1256
struct sk_buff *skb)
1257
{
1258
struct hci_rp_user_confirm_reply *rp = data;
1259
1260
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261
1262
hci_dev_lock(hdev);
1263
1264
if (hci_dev_test_flag(hdev, HCI_MGMT))
1265
mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1266
rp->status);
1267
1268
hci_dev_unlock(hdev);
1269
1270
return rp->status;
1271
}
1272
1273
static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1274
struct sk_buff *skb)
1275
{
1276
struct hci_rp_user_confirm_reply *rp = data;
1277
1278
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1279
1280
hci_dev_lock(hdev);
1281
1282
if (hci_dev_test_flag(hdev, HCI_MGMT))
1283
mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1284
ACL_LINK, 0, rp->status);
1285
1286
hci_dev_unlock(hdev);
1287
1288
return rp->status;
1289
}
1290
1291
static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1292
struct sk_buff *skb)
1293
{
1294
struct hci_rp_user_confirm_reply *rp = data;
1295
1296
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1297
1298
hci_dev_lock(hdev);
1299
1300
if (hci_dev_test_flag(hdev, HCI_MGMT))
1301
mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1302
0, rp->status);
1303
1304
hci_dev_unlock(hdev);
1305
1306
return rp->status;
1307
}
1308
1309
static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1310
struct sk_buff *skb)
1311
{
1312
struct hci_rp_user_confirm_reply *rp = data;
1313
1314
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1315
1316
hci_dev_lock(hdev);
1317
1318
if (hci_dev_test_flag(hdev, HCI_MGMT))
1319
mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1320
ACL_LINK, 0, rp->status);
1321
1322
hci_dev_unlock(hdev);
1323
1324
return rp->status;
1325
}
1326
1327
static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1328
struct sk_buff *skb)
1329
{
1330
struct hci_rp_read_local_oob_data *rp = data;
1331
1332
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1333
1334
return rp->status;
1335
}
1336
1337
static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1338
struct sk_buff *skb)
1339
{
1340
struct hci_rp_read_local_oob_ext_data *rp = data;
1341
1342
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1343
1344
return rp->status;
1345
}
1346
1347
static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1348
struct sk_buff *skb)
1349
{
1350
struct hci_ev_status *rp = data;
1351
bdaddr_t *sent;
1352
1353
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1354
1355
if (rp->status)
1356
return rp->status;
1357
1358
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1359
if (!sent)
1360
return rp->status;
1361
1362
hci_dev_lock(hdev);
1363
1364
bacpy(&hdev->random_addr, sent);
1365
1366
if (!bacmp(&hdev->rpa, sent)) {
1367
hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1368
queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1369
secs_to_jiffies(hdev->rpa_timeout));
1370
}
1371
1372
hci_dev_unlock(hdev);
1373
1374
return rp->status;
1375
}
1376
1377
static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1378
struct sk_buff *skb)
1379
{
1380
struct hci_ev_status *rp = data;
1381
struct hci_cp_le_set_default_phy *cp;
1382
1383
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384
1385
if (rp->status)
1386
return rp->status;
1387
1388
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1389
if (!cp)
1390
return rp->status;
1391
1392
hci_dev_lock(hdev);
1393
1394
hdev->le_tx_def_phys = cp->tx_phys;
1395
hdev->le_rx_def_phys = cp->rx_phys;
1396
1397
hci_dev_unlock(hdev);
1398
1399
return rp->status;
1400
}
1401
1402
static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1403
struct sk_buff *skb)
1404
{
1405
struct hci_ev_status *rp = data;
1406
struct hci_cp_le_set_adv_set_rand_addr *cp;
1407
struct adv_info *adv;
1408
1409
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1410
1411
if (rp->status)
1412
return rp->status;
1413
1414
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1415
/* Update only in case the adv instance since handle 0x00 shall be using
1416
* HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1417
* non-extended adverting.
1418
*/
1419
if (!cp || !cp->handle)
1420
return rp->status;
1421
1422
hci_dev_lock(hdev);
1423
1424
adv = hci_find_adv_instance(hdev, cp->handle);
1425
if (adv) {
1426
bacpy(&adv->random_addr, &cp->bdaddr);
1427
if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1428
adv->rpa_expired = false;
1429
queue_delayed_work(hdev->workqueue,
1430
&adv->rpa_expired_cb,
1431
secs_to_jiffies(hdev->rpa_timeout));
1432
}
1433
}
1434
1435
hci_dev_unlock(hdev);
1436
1437
return rp->status;
1438
}
1439
1440
static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1441
struct sk_buff *skb)
1442
{
1443
struct hci_ev_status *rp = data;
1444
u8 *instance;
1445
int err;
1446
1447
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1448
1449
if (rp->status)
1450
return rp->status;
1451
1452
instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1453
if (!instance)
1454
return rp->status;
1455
1456
hci_dev_lock(hdev);
1457
1458
err = hci_remove_adv_instance(hdev, *instance);
1459
if (!err)
1460
mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1461
*instance);
1462
1463
hci_dev_unlock(hdev);
1464
1465
return rp->status;
1466
}
1467
1468
static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1469
struct sk_buff *skb)
1470
{
1471
struct hci_ev_status *rp = data;
1472
struct adv_info *adv, *n;
1473
int err;
1474
1475
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1476
1477
if (rp->status)
1478
return rp->status;
1479
1480
if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1481
return rp->status;
1482
1483
hci_dev_lock(hdev);
1484
1485
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1486
u8 instance = adv->instance;
1487
1488
err = hci_remove_adv_instance(hdev, instance);
1489
if (!err)
1490
mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1491
hdev, instance);
1492
}
1493
1494
hci_dev_unlock(hdev);
1495
1496
return rp->status;
1497
}
1498
1499
static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1500
struct sk_buff *skb)
1501
{
1502
struct hci_rp_le_read_transmit_power *rp = data;
1503
1504
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1505
1506
if (rp->status)
1507
return rp->status;
1508
1509
hdev->min_le_tx_power = rp->min_le_tx_power;
1510
hdev->max_le_tx_power = rp->max_le_tx_power;
1511
1512
return rp->status;
1513
}
1514
1515
static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1516
struct sk_buff *skb)
1517
{
1518
struct hci_ev_status *rp = data;
1519
struct hci_cp_le_set_privacy_mode *cp;
1520
struct hci_conn_params *params;
1521
1522
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1523
1524
if (rp->status)
1525
return rp->status;
1526
1527
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1528
if (!cp)
1529
return rp->status;
1530
1531
hci_dev_lock(hdev);
1532
1533
params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1534
if (params)
1535
WRITE_ONCE(params->privacy_mode, cp->mode);
1536
1537
hci_dev_unlock(hdev);
1538
1539
return rp->status;
1540
}
1541
1542
static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1543
struct sk_buff *skb)
1544
{
1545
struct hci_ev_status *rp = data;
1546
__u8 *sent;
1547
1548
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549
1550
if (rp->status)
1551
return rp->status;
1552
1553
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1554
if (!sent)
1555
return rp->status;
1556
1557
hci_dev_lock(hdev);
1558
1559
/* If we're doing connection initiation as peripheral. Set a
1560
* timeout in case something goes wrong.
1561
*/
1562
if (*sent) {
1563
struct hci_conn *conn;
1564
1565
hci_dev_set_flag(hdev, HCI_LE_ADV);
1566
1567
conn = hci_lookup_le_connect(hdev);
1568
if (conn)
1569
queue_delayed_work(hdev->workqueue,
1570
&conn->le_conn_timeout,
1571
conn->conn_timeout);
1572
} else {
1573
hci_dev_clear_flag(hdev, HCI_LE_ADV);
1574
}
1575
1576
hci_dev_unlock(hdev);
1577
1578
return rp->status;
1579
}
1580
1581
static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1582
struct sk_buff *skb)
1583
{
1584
struct hci_cp_le_set_ext_adv_enable *cp;
1585
struct hci_cp_ext_adv_set *set;
1586
struct adv_info *adv = NULL, *n;
1587
struct hci_ev_status *rp = data;
1588
1589
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1590
1591
if (rp->status)
1592
return rp->status;
1593
1594
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1595
if (!cp)
1596
return rp->status;
1597
1598
set = (void *)cp->data;
1599
1600
hci_dev_lock(hdev);
1601
1602
if (cp->num_of_sets)
1603
adv = hci_find_adv_instance(hdev, set->handle);
1604
1605
if (cp->enable) {
1606
struct hci_conn *conn;
1607
1608
hci_dev_set_flag(hdev, HCI_LE_ADV);
1609
1610
if (adv)
1611
adv->enabled = true;
1612
else if (!set->handle)
1613
hci_dev_set_flag(hdev, HCI_LE_ADV_0);
1614
1615
conn = hci_lookup_le_connect(hdev);
1616
if (conn)
1617
queue_delayed_work(hdev->workqueue,
1618
&conn->le_conn_timeout,
1619
conn->conn_timeout);
1620
} else {
1621
if (cp->num_of_sets) {
1622
if (adv)
1623
adv->enabled = false;
1624
else if (!set->handle)
1625
hci_dev_clear_flag(hdev, HCI_LE_ADV_0);
1626
1627
/* If just one instance was disabled check if there are
1628
* any other instance enabled before clearing HCI_LE_ADV
1629
*/
1630
list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1631
list) {
1632
if (adv->enabled)
1633
goto unlock;
1634
}
1635
} else {
1636
/* All instances shall be considered disabled */
1637
list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1638
list)
1639
adv->enabled = false;
1640
}
1641
1642
hci_dev_clear_flag(hdev, HCI_LE_ADV);
1643
}
1644
1645
unlock:
1646
hci_dev_unlock(hdev);
1647
return rp->status;
1648
}
1649
1650
static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1651
struct sk_buff *skb)
1652
{
1653
struct hci_cp_le_set_scan_param *cp;
1654
struct hci_ev_status *rp = data;
1655
1656
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1657
1658
if (rp->status)
1659
return rp->status;
1660
1661
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1662
if (!cp)
1663
return rp->status;
1664
1665
hci_dev_lock(hdev);
1666
1667
hdev->le_scan_type = cp->type;
1668
1669
hci_dev_unlock(hdev);
1670
1671
return rp->status;
1672
}
1673
1674
static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1675
struct sk_buff *skb)
1676
{
1677
struct hci_cp_le_set_ext_scan_params *cp;
1678
struct hci_ev_status *rp = data;
1679
struct hci_cp_le_scan_phy_params *phy_param;
1680
1681
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1682
1683
if (rp->status)
1684
return rp->status;
1685
1686
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1687
if (!cp)
1688
return rp->status;
1689
1690
phy_param = (void *)cp->data;
1691
1692
hci_dev_lock(hdev);
1693
1694
hdev->le_scan_type = phy_param->type;
1695
1696
hci_dev_unlock(hdev);
1697
1698
return rp->status;
1699
}
1700
1701
static bool has_pending_adv_report(struct hci_dev *hdev)
1702
{
1703
struct discovery_state *d = &hdev->discovery;
1704
1705
return bacmp(&d->last_adv_addr, BDADDR_ANY);
1706
}
1707
1708
static void clear_pending_adv_report(struct hci_dev *hdev)
1709
{
1710
struct discovery_state *d = &hdev->discovery;
1711
1712
bacpy(&d->last_adv_addr, BDADDR_ANY);
1713
d->last_adv_data_len = 0;
1714
}
1715
1716
static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1717
u8 bdaddr_type, s8 rssi, u32 flags,
1718
u8 *data, u8 len)
1719
{
1720
struct discovery_state *d = &hdev->discovery;
1721
1722
if (len > max_adv_len(hdev))
1723
return;
1724
1725
bacpy(&d->last_adv_addr, bdaddr);
1726
d->last_adv_addr_type = bdaddr_type;
1727
d->last_adv_rssi = rssi;
1728
d->last_adv_flags = flags;
1729
memcpy(d->last_adv_data, data, len);
1730
d->last_adv_data_len = len;
1731
}
1732
1733
static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1734
{
1735
hci_dev_lock(hdev);
1736
1737
switch (enable) {
1738
case LE_SCAN_ENABLE:
1739
hci_dev_set_flag(hdev, HCI_LE_SCAN);
1740
if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
1741
clear_pending_adv_report(hdev);
1742
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1743
}
1744
break;
1745
1746
case LE_SCAN_DISABLE:
1747
/* We do this here instead of when setting DISCOVERY_STOPPED
1748
* since the latter would potentially require waiting for
1749
* inquiry to stop too.
1750
*/
1751
if (has_pending_adv_report(hdev)) {
1752
struct discovery_state *d = &hdev->discovery;
1753
1754
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1755
d->last_adv_addr_type, NULL,
1756
d->last_adv_rssi, d->last_adv_flags,
1757
d->last_adv_data,
1758
d->last_adv_data_len, NULL, 0, 0);
1759
}
1760
1761
/* Cancel this timer so that we don't try to disable scanning
1762
* when it's already disabled.
1763
*/
1764
cancel_delayed_work(&hdev->le_scan_disable);
1765
1766
hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1767
1768
/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1769
* interrupted scanning due to a connect request. Mark
1770
* therefore discovery as stopped.
1771
*/
1772
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1773
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1774
else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1775
hdev->discovery.state == DISCOVERY_FINDING)
1776
queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1777
1778
break;
1779
1780
default:
1781
bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1782
enable);
1783
break;
1784
}
1785
1786
hci_dev_unlock(hdev);
1787
}
1788
1789
static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1790
struct sk_buff *skb)
1791
{
1792
struct hci_cp_le_set_scan_enable *cp;
1793
struct hci_ev_status *rp = data;
1794
1795
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1796
1797
if (rp->status)
1798
return rp->status;
1799
1800
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1801
if (!cp)
1802
return rp->status;
1803
1804
le_set_scan_enable_complete(hdev, cp->enable);
1805
1806
return rp->status;
1807
}
1808
1809
static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1810
struct sk_buff *skb)
1811
{
1812
struct hci_cp_le_set_ext_scan_enable *cp;
1813
struct hci_ev_status *rp = data;
1814
1815
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1816
1817
if (rp->status)
1818
return rp->status;
1819
1820
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1821
if (!cp)
1822
return rp->status;
1823
1824
le_set_scan_enable_complete(hdev, cp->enable);
1825
1826
return rp->status;
1827
}
1828
1829
static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1830
struct sk_buff *skb)
1831
{
1832
struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1833
1834
bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1835
rp->num_of_sets);
1836
1837
if (rp->status)
1838
return rp->status;
1839
1840
hdev->le_num_of_adv_sets = rp->num_of_sets;
1841
1842
return rp->status;
1843
}
1844
1845
static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1846
struct sk_buff *skb)
1847
{
1848
struct hci_rp_le_read_accept_list_size *rp = data;
1849
1850
bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1851
1852
if (rp->status)
1853
return rp->status;
1854
1855
hdev->le_accept_list_size = rp->size;
1856
1857
return rp->status;
1858
}
1859
1860
static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1861
struct sk_buff *skb)
1862
{
1863
struct hci_ev_status *rp = data;
1864
1865
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1866
1867
if (rp->status)
1868
return rp->status;
1869
1870
hci_dev_lock(hdev);
1871
hci_bdaddr_list_clear(&hdev->le_accept_list);
1872
hci_dev_unlock(hdev);
1873
1874
return rp->status;
1875
}
1876
1877
static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1878
struct sk_buff *skb)
1879
{
1880
struct hci_cp_le_add_to_accept_list *sent;
1881
struct hci_ev_status *rp = data;
1882
1883
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1884
1885
if (rp->status)
1886
return rp->status;
1887
1888
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1889
if (!sent)
1890
return rp->status;
1891
1892
hci_dev_lock(hdev);
1893
hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1894
sent->bdaddr_type);
1895
hci_dev_unlock(hdev);
1896
1897
return rp->status;
1898
}
1899
1900
static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1901
struct sk_buff *skb)
1902
{
1903
struct hci_cp_le_del_from_accept_list *sent;
1904
struct hci_ev_status *rp = data;
1905
1906
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1907
1908
if (rp->status)
1909
return rp->status;
1910
1911
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1912
if (!sent)
1913
return rp->status;
1914
1915
hci_dev_lock(hdev);
1916
hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1917
sent->bdaddr_type);
1918
hci_dev_unlock(hdev);
1919
1920
return rp->status;
1921
}
1922
1923
static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1924
struct sk_buff *skb)
1925
{
1926
struct hci_rp_le_read_supported_states *rp = data;
1927
1928
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1929
1930
if (rp->status)
1931
return rp->status;
1932
1933
memcpy(hdev->le_states, rp->le_states, 8);
1934
1935
return rp->status;
1936
}
1937
1938
static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1939
struct sk_buff *skb)
1940
{
1941
struct hci_rp_le_read_def_data_len *rp = data;
1942
1943
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1944
1945
if (rp->status)
1946
return rp->status;
1947
1948
hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1949
hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1950
1951
return rp->status;
1952
}
1953
1954
static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1955
struct sk_buff *skb)
1956
{
1957
struct hci_cp_le_write_def_data_len *sent;
1958
struct hci_ev_status *rp = data;
1959
1960
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1961
1962
if (rp->status)
1963
return rp->status;
1964
1965
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1966
if (!sent)
1967
return rp->status;
1968
1969
hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1970
hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1971
1972
return rp->status;
1973
}
1974
1975
static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1976
struct sk_buff *skb)
1977
{
1978
struct hci_cp_le_add_to_resolv_list *sent;
1979
struct hci_ev_status *rp = data;
1980
1981
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1982
1983
if (rp->status)
1984
return rp->status;
1985
1986
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1987
if (!sent)
1988
return rp->status;
1989
1990
hci_dev_lock(hdev);
1991
hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1992
sent->bdaddr_type, sent->peer_irk,
1993
sent->local_irk);
1994
hci_dev_unlock(hdev);
1995
1996
return rp->status;
1997
}
1998
1999
static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2000
struct sk_buff *skb)
2001
{
2002
struct hci_cp_le_del_from_resolv_list *sent;
2003
struct hci_ev_status *rp = data;
2004
2005
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2006
2007
if (rp->status)
2008
return rp->status;
2009
2010
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2011
if (!sent)
2012
return rp->status;
2013
2014
hci_dev_lock(hdev);
2015
hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2016
sent->bdaddr_type);
2017
hci_dev_unlock(hdev);
2018
2019
return rp->status;
2020
}
2021
2022
static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2023
struct sk_buff *skb)
2024
{
2025
struct hci_ev_status *rp = data;
2026
2027
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028
2029
if (rp->status)
2030
return rp->status;
2031
2032
hci_dev_lock(hdev);
2033
hci_bdaddr_list_clear(&hdev->le_resolv_list);
2034
hci_dev_unlock(hdev);
2035
2036
return rp->status;
2037
}
2038
2039
static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2040
struct sk_buff *skb)
2041
{
2042
struct hci_rp_le_read_resolv_list_size *rp = data;
2043
2044
bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2045
2046
if (rp->status)
2047
return rp->status;
2048
2049
hdev->le_resolv_list_size = rp->size;
2050
2051
return rp->status;
2052
}
2053
2054
static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2055
struct sk_buff *skb)
2056
{
2057
struct hci_ev_status *rp = data;
2058
__u8 *sent;
2059
2060
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2061
2062
if (rp->status)
2063
return rp->status;
2064
2065
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2066
if (!sent)
2067
return rp->status;
2068
2069
hci_dev_lock(hdev);
2070
2071
if (*sent)
2072
hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2073
else
2074
hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2075
2076
hci_dev_unlock(hdev);
2077
2078
return rp->status;
2079
}
2080
2081
static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2082
struct sk_buff *skb)
2083
{
2084
struct hci_rp_le_read_max_data_len *rp = data;
2085
2086
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2087
2088
if (rp->status)
2089
return rp->status;
2090
2091
hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2092
hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2093
hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2094
hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2095
2096
return rp->status;
2097
}
2098
2099
static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2100
struct sk_buff *skb)
2101
{
2102
struct hci_cp_write_le_host_supported *sent;
2103
struct hci_ev_status *rp = data;
2104
2105
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2106
2107
if (rp->status)
2108
return rp->status;
2109
2110
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2111
if (!sent)
2112
return rp->status;
2113
2114
hci_dev_lock(hdev);
2115
2116
if (sent->le) {
2117
hdev->features[1][0] |= LMP_HOST_LE;
2118
hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2119
} else {
2120
hdev->features[1][0] &= ~LMP_HOST_LE;
2121
hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2122
hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2123
}
2124
2125
if (sent->simul)
2126
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2127
else
2128
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2129
2130
hci_dev_unlock(hdev);
2131
2132
return rp->status;
2133
}
2134
2135
static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2136
struct sk_buff *skb)
2137
{
2138
struct hci_cp_le_set_adv_param *cp;
2139
struct hci_ev_status *rp = data;
2140
2141
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2142
2143
if (rp->status)
2144
return rp->status;
2145
2146
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2147
if (!cp)
2148
return rp->status;
2149
2150
hci_dev_lock(hdev);
2151
hdev->adv_addr_type = cp->own_address_type;
2152
hci_dev_unlock(hdev);
2153
2154
return rp->status;
2155
}
2156
2157
static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2158
struct sk_buff *skb)
2159
{
2160
struct hci_rp_read_rssi *rp = data;
2161
struct hci_conn *conn;
2162
2163
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2164
2165
if (rp->status)
2166
return rp->status;
2167
2168
hci_dev_lock(hdev);
2169
2170
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2171
if (conn)
2172
conn->rssi = rp->rssi;
2173
2174
hci_dev_unlock(hdev);
2175
2176
return rp->status;
2177
}
2178
2179
static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2180
struct sk_buff *skb)
2181
{
2182
struct hci_cp_read_tx_power *sent;
2183
struct hci_rp_read_tx_power *rp = data;
2184
struct hci_conn *conn;
2185
2186
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2187
2188
if (rp->status)
2189
return rp->status;
2190
2191
sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2192
if (!sent)
2193
return rp->status;
2194
2195
hci_dev_lock(hdev);
2196
2197
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2198
if (!conn)
2199
goto unlock;
2200
2201
switch (sent->type) {
2202
case 0x00:
2203
conn->tx_power = rp->tx_power;
2204
break;
2205
case 0x01:
2206
conn->max_tx_power = rp->tx_power;
2207
break;
2208
}
2209
2210
unlock:
2211
hci_dev_unlock(hdev);
2212
return rp->status;
2213
}
2214
2215
static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2216
struct sk_buff *skb)
2217
{
2218
struct hci_ev_status *rp = data;
2219
u8 *mode;
2220
2221
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2222
2223
if (rp->status)
2224
return rp->status;
2225
2226
mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2227
if (mode)
2228
hdev->ssp_debug_mode = *mode;
2229
2230
return rp->status;
2231
}
2232
2233
static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2234
{
2235
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2236
2237
if (status)
2238
return;
2239
2240
if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2241
set_bit(HCI_INQUIRY, &hdev->flags);
2242
}
2243
2244
static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2245
{
2246
struct hci_cp_create_conn *cp;
2247
struct hci_conn *conn;
2248
2249
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2250
2251
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2252
if (!cp)
2253
return;
2254
2255
hci_dev_lock(hdev);
2256
2257
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2258
2259
bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2260
2261
if (status) {
2262
if (conn && conn->state == BT_CONNECT) {
2263
conn->state = BT_CLOSED;
2264
hci_connect_cfm(conn, status);
2265
hci_conn_del(conn);
2266
}
2267
} else {
2268
if (!conn) {
2269
conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2270
0, HCI_ROLE_MASTER);
2271
if (IS_ERR(conn))
2272
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
2273
}
2274
}
2275
2276
hci_dev_unlock(hdev);
2277
}
2278
2279
static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2280
{
2281
struct hci_cp_add_sco *cp;
2282
struct hci_conn *acl;
2283
struct hci_link *link;
2284
__u16 handle;
2285
2286
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2287
2288
if (!status)
2289
return;
2290
2291
cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2292
if (!cp)
2293
return;
2294
2295
handle = __le16_to_cpu(cp->handle);
2296
2297
bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2298
2299
hci_dev_lock(hdev);
2300
2301
acl = hci_conn_hash_lookup_handle(hdev, handle);
2302
if (acl) {
2303
link = list_first_entry_or_null(&acl->link_list,
2304
struct hci_link, list);
2305
if (link && link->conn) {
2306
link->conn->state = BT_CLOSED;
2307
2308
hci_connect_cfm(link->conn, status);
2309
hci_conn_del(link->conn);
2310
}
2311
}
2312
2313
hci_dev_unlock(hdev);
2314
}
2315
2316
static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2317
{
2318
struct hci_cp_auth_requested *cp;
2319
struct hci_conn *conn;
2320
2321
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2322
2323
if (!status)
2324
return;
2325
2326
cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2327
if (!cp)
2328
return;
2329
2330
hci_dev_lock(hdev);
2331
2332
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2333
if (conn) {
2334
if (conn->state == BT_CONFIG) {
2335
hci_connect_cfm(conn, status);
2336
hci_conn_drop(conn);
2337
}
2338
}
2339
2340
hci_dev_unlock(hdev);
2341
}
2342
2343
static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2344
{
2345
struct hci_cp_set_conn_encrypt *cp;
2346
struct hci_conn *conn;
2347
2348
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2349
2350
if (!status)
2351
return;
2352
2353
cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2354
if (!cp)
2355
return;
2356
2357
hci_dev_lock(hdev);
2358
2359
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2360
if (conn) {
2361
if (conn->state == BT_CONFIG) {
2362
hci_connect_cfm(conn, status);
2363
hci_conn_drop(conn);
2364
}
2365
}
2366
2367
hci_dev_unlock(hdev);
2368
}
2369
2370
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2371
struct hci_conn *conn)
2372
{
2373
if (conn->state != BT_CONFIG || !conn->out)
2374
return 0;
2375
2376
if (conn->pending_sec_level == BT_SECURITY_SDP)
2377
return 0;
2378
2379
/* Only request authentication for SSP connections or non-SSP
2380
* devices with sec_level MEDIUM or HIGH or if MITM protection
2381
* is requested.
2382
*/
2383
if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2384
conn->pending_sec_level != BT_SECURITY_FIPS &&
2385
conn->pending_sec_level != BT_SECURITY_HIGH &&
2386
conn->pending_sec_level != BT_SECURITY_MEDIUM)
2387
return 0;
2388
2389
return 1;
2390
}
2391
2392
static int hci_resolve_name(struct hci_dev *hdev,
2393
struct inquiry_entry *e)
2394
{
2395
struct hci_cp_remote_name_req cp;
2396
2397
memset(&cp, 0, sizeof(cp));
2398
2399
bacpy(&cp.bdaddr, &e->data.bdaddr);
2400
cp.pscan_rep_mode = e->data.pscan_rep_mode;
2401
cp.pscan_mode = e->data.pscan_mode;
2402
cp.clock_offset = e->data.clock_offset;
2403
2404
return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2405
}
2406
2407
static bool hci_resolve_next_name(struct hci_dev *hdev)
2408
{
2409
struct discovery_state *discov = &hdev->discovery;
2410
struct inquiry_entry *e;
2411
2412
if (list_empty(&discov->resolve))
2413
return false;
2414
2415
/* We should stop if we already spent too much time resolving names. */
2416
if (time_after(jiffies, discov->name_resolve_timeout)) {
2417
bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2418
return false;
2419
}
2420
2421
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2422
if (!e)
2423
return false;
2424
2425
if (hci_resolve_name(hdev, e) == 0) {
2426
e->name_state = NAME_PENDING;
2427
return true;
2428
}
2429
2430
return false;
2431
}
2432
2433
static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2434
bdaddr_t *bdaddr, u8 *name, u8 name_len)
2435
{
2436
struct discovery_state *discov = &hdev->discovery;
2437
struct inquiry_entry *e;
2438
2439
/* Update the mgmt connected state if necessary. Be careful with
2440
* conn objects that exist but are not (yet) connected however.
2441
* Only those in BT_CONFIG or BT_CONNECTED states can be
2442
* considered connected.
2443
*/
2444
if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2445
mgmt_device_connected(hdev, conn, name, name_len);
2446
2447
if (discov->state == DISCOVERY_STOPPED)
2448
return;
2449
2450
if (discov->state == DISCOVERY_STOPPING)
2451
goto discov_complete;
2452
2453
if (discov->state != DISCOVERY_RESOLVING)
2454
return;
2455
2456
e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2457
/* If the device was not found in a list of found devices names of which
2458
* are pending. there is no need to continue resolving a next name as it
2459
* will be done upon receiving another Remote Name Request Complete
2460
* Event */
2461
if (!e)
2462
return;
2463
2464
list_del(&e->list);
2465
2466
e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2467
mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2468
name, name_len);
2469
2470
if (hci_resolve_next_name(hdev))
2471
return;
2472
2473
discov_complete:
2474
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2475
}
2476
2477
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2478
{
2479
struct hci_cp_remote_name_req *cp;
2480
struct hci_conn *conn;
2481
2482
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2483
2484
/* If successful wait for the name req complete event before
2485
* checking for the need to do authentication */
2486
if (!status)
2487
return;
2488
2489
cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2490
if (!cp)
2491
return;
2492
2493
hci_dev_lock(hdev);
2494
2495
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2496
2497
if (hci_dev_test_flag(hdev, HCI_MGMT))
2498
hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2499
2500
if (!conn)
2501
goto unlock;
2502
2503
if (!hci_outgoing_auth_needed(hdev, conn))
2504
goto unlock;
2505
2506
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2507
struct hci_cp_auth_requested auth_cp;
2508
2509
set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2510
2511
auth_cp.handle = __cpu_to_le16(conn->handle);
2512
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2513
sizeof(auth_cp), &auth_cp);
2514
}
2515
2516
unlock:
2517
hci_dev_unlock(hdev);
2518
}
2519
2520
static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2521
{
2522
struct hci_cp_read_remote_features *cp;
2523
struct hci_conn *conn;
2524
2525
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2526
2527
if (!status)
2528
return;
2529
2530
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2531
if (!cp)
2532
return;
2533
2534
hci_dev_lock(hdev);
2535
2536
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2537
if (conn) {
2538
if (conn->state == BT_CONFIG) {
2539
hci_connect_cfm(conn, status);
2540
hci_conn_drop(conn);
2541
}
2542
}
2543
2544
hci_dev_unlock(hdev);
2545
}
2546
2547
static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2548
{
2549
struct hci_cp_read_remote_ext_features *cp;
2550
struct hci_conn *conn;
2551
2552
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2553
2554
if (!status)
2555
return;
2556
2557
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2558
if (!cp)
2559
return;
2560
2561
hci_dev_lock(hdev);
2562
2563
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2564
if (conn) {
2565
if (conn->state == BT_CONFIG) {
2566
hci_connect_cfm(conn, status);
2567
hci_conn_drop(conn);
2568
}
2569
}
2570
2571
hci_dev_unlock(hdev);
2572
}
2573
2574
static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2575
__u8 status)
2576
{
2577
struct hci_conn *acl;
2578
struct hci_link *link;
2579
2580
bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2581
2582
hci_dev_lock(hdev);
2583
2584
acl = hci_conn_hash_lookup_handle(hdev, handle);
2585
if (acl) {
2586
link = list_first_entry_or_null(&acl->link_list,
2587
struct hci_link, list);
2588
if (link && link->conn) {
2589
link->conn->state = BT_CLOSED;
2590
2591
hci_connect_cfm(link->conn, status);
2592
hci_conn_del(link->conn);
2593
}
2594
}
2595
2596
hci_dev_unlock(hdev);
2597
}
2598
2599
static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2600
{
2601
struct hci_cp_setup_sync_conn *cp;
2602
2603
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2604
2605
if (!status)
2606
return;
2607
2608
cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2609
if (!cp)
2610
return;
2611
2612
hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2613
}
2614
2615
static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2616
{
2617
struct hci_cp_enhanced_setup_sync_conn *cp;
2618
2619
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2620
2621
if (!status)
2622
return;
2623
2624
cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2625
if (!cp)
2626
return;
2627
2628
hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2629
}
2630
2631
static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2632
{
2633
struct hci_cp_sniff_mode *cp;
2634
struct hci_conn *conn;
2635
2636
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2637
2638
if (!status)
2639
return;
2640
2641
cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2642
if (!cp)
2643
return;
2644
2645
hci_dev_lock(hdev);
2646
2647
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2648
if (conn) {
2649
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2650
2651
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2652
hci_sco_setup(conn, status);
2653
}
2654
2655
hci_dev_unlock(hdev);
2656
}
2657
2658
static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2659
{
2660
struct hci_cp_exit_sniff_mode *cp;
2661
struct hci_conn *conn;
2662
2663
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2664
2665
if (!status)
2666
return;
2667
2668
cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2669
if (!cp)
2670
return;
2671
2672
hci_dev_lock(hdev);
2673
2674
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2675
if (conn) {
2676
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2677
2678
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2679
hci_sco_setup(conn, status);
2680
}
2681
2682
hci_dev_unlock(hdev);
2683
}
2684
2685
static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2686
{
2687
struct hci_cp_disconnect *cp;
2688
struct hci_conn_params *params;
2689
struct hci_conn *conn;
2690
bool mgmt_conn;
2691
2692
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2693
2694
/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2695
* otherwise cleanup the connection immediately.
2696
*/
2697
if (!status && !hdev->suspended)
2698
return;
2699
2700
cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2701
if (!cp)
2702
return;
2703
2704
hci_dev_lock(hdev);
2705
2706
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2707
if (!conn)
2708
goto unlock;
2709
2710
if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
2711
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2712
conn->dst_type, status);
2713
2714
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2715
hdev->cur_adv_instance = conn->adv_instance;
2716
hci_enable_advertising(hdev);
2717
}
2718
2719
/* Inform sockets conn is gone before we delete it */
2720
hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2721
2722
goto done;
2723
}
2724
2725
/* During suspend, mark connection as closed immediately
2726
* since we might not receive HCI_EV_DISCONN_COMPLETE
2727
*/
2728
if (hdev->suspended)
2729
conn->state = BT_CLOSED;
2730
2731
mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2732
2733
if (conn->type == ACL_LINK) {
2734
if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2735
hci_remove_link_key(hdev, &conn->dst);
2736
}
2737
2738
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2739
if (params) {
2740
switch (params->auto_connect) {
2741
case HCI_AUTO_CONN_LINK_LOSS:
2742
if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2743
break;
2744
fallthrough;
2745
2746
case HCI_AUTO_CONN_DIRECT:
2747
case HCI_AUTO_CONN_ALWAYS:
2748
hci_pend_le_list_del_init(params);
2749
hci_pend_le_list_add(params, &hdev->pend_le_conns);
2750
break;
2751
2752
default:
2753
break;
2754
}
2755
}
2756
2757
mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2758
cp->reason, mgmt_conn);
2759
2760
hci_disconn_cfm(conn, cp->reason);
2761
2762
done:
2763
/* If the disconnection failed for any reason, the upper layer
2764
* does not retry to disconnect in current implementation.
2765
* Hence, we need to do some basic cleanup here and re-enable
2766
* advertising if necessary.
2767
*/
2768
hci_conn_del(conn);
2769
unlock:
2770
hci_dev_unlock(hdev);
2771
}
2772
2773
static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2774
{
2775
/* When using controller based address resolution, then the new
2776
* address types 0x02 and 0x03 are used. These types need to be
2777
* converted back into either public address or random address type
2778
*/
2779
switch (type) {
2780
case ADDR_LE_DEV_PUBLIC_RESOLVED:
2781
if (resolved)
2782
*resolved = true;
2783
return ADDR_LE_DEV_PUBLIC;
2784
case ADDR_LE_DEV_RANDOM_RESOLVED:
2785
if (resolved)
2786
*resolved = true;
2787
return ADDR_LE_DEV_RANDOM;
2788
}
2789
2790
if (resolved)
2791
*resolved = false;
2792
return type;
2793
}
2794
2795
static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2796
u8 peer_addr_type, u8 own_address_type,
2797
u8 filter_policy)
2798
{
2799
struct hci_conn *conn;
2800
2801
conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2802
peer_addr_type);
2803
if (!conn)
2804
return;
2805
2806
own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2807
2808
/* Store the initiator and responder address information which
2809
* is needed for SMP. These values will not change during the
2810
* lifetime of the connection.
2811
*/
2812
conn->init_addr_type = own_address_type;
2813
if (own_address_type == ADDR_LE_DEV_RANDOM)
2814
bacpy(&conn->init_addr, &hdev->random_addr);
2815
else
2816
bacpy(&conn->init_addr, &hdev->bdaddr);
2817
2818
conn->resp_addr_type = peer_addr_type;
2819
bacpy(&conn->resp_addr, peer_addr);
2820
}
2821
2822
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2823
{
2824
struct hci_cp_le_create_conn *cp;
2825
2826
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2827
2828
/* All connection failure handling is taken care of by the
2829
* hci_conn_failed function which is triggered by the HCI
2830
* request completion callbacks used for connecting.
2831
*/
2832
if (status)
2833
return;
2834
2835
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2836
if (!cp)
2837
return;
2838
2839
hci_dev_lock(hdev);
2840
2841
cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2842
cp->own_address_type, cp->filter_policy);
2843
2844
hci_dev_unlock(hdev);
2845
}
2846
2847
static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2848
{
2849
struct hci_cp_le_ext_create_conn *cp;
2850
2851
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2852
2853
/* All connection failure handling is taken care of by the
2854
* hci_conn_failed function which is triggered by the HCI
2855
* request completion callbacks used for connecting.
2856
*/
2857
if (status)
2858
return;
2859
2860
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2861
if (!cp)
2862
return;
2863
2864
hci_dev_lock(hdev);
2865
2866
cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2867
cp->own_addr_type, cp->filter_policy);
2868
2869
hci_dev_unlock(hdev);
2870
}
2871
2872
static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2873
{
2874
struct hci_cp_le_read_remote_features *cp;
2875
struct hci_conn *conn;
2876
2877
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2878
2879
if (!status)
2880
return;
2881
2882
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2883
if (!cp)
2884
return;
2885
2886
hci_dev_lock(hdev);
2887
2888
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2889
if (conn && conn->state == BT_CONFIG)
2890
hci_connect_cfm(conn, status);
2891
2892
hci_dev_unlock(hdev);
2893
}
2894
2895
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2896
{
2897
struct hci_cp_le_start_enc *cp;
2898
struct hci_conn *conn;
2899
2900
bt_dev_dbg(hdev, "status 0x%2.2x", status);
2901
2902
if (!status)
2903
return;
2904
2905
hci_dev_lock(hdev);
2906
2907
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2908
if (!cp)
2909
goto unlock;
2910
2911
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2912
if (!conn)
2913
goto unlock;
2914
2915
if (conn->state != BT_CONNECTED)
2916
goto unlock;
2917
2918
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2919
hci_conn_drop(conn);
2920
2921
unlock:
2922
hci_dev_unlock(hdev);
2923
}
2924
2925
static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2926
{
2927
struct hci_cp_switch_role *cp;
2928
struct hci_conn *conn;
2929
2930
BT_DBG("%s status 0x%2.2x", hdev->name, status);
2931
2932
if (!status)
2933
return;
2934
2935
cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2936
if (!cp)
2937
return;
2938
2939
hci_dev_lock(hdev);
2940
2941
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2942
if (conn)
2943
clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2944
2945
hci_dev_unlock(hdev);
2946
}
2947
2948
static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2949
struct sk_buff *skb)
2950
{
2951
struct hci_ev_status *ev = data;
2952
struct discovery_state *discov = &hdev->discovery;
2953
struct inquiry_entry *e;
2954
2955
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2956
2957
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2958
return;
2959
2960
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2961
wake_up_bit(&hdev->flags, HCI_INQUIRY);
2962
2963
if (!hci_dev_test_flag(hdev, HCI_MGMT))
2964
return;
2965
2966
hci_dev_lock(hdev);
2967
2968
if (discov->state != DISCOVERY_FINDING)
2969
goto unlock;
2970
2971
if (list_empty(&discov->resolve)) {
2972
/* When BR/EDR inquiry is active and no LE scanning is in
2973
* progress, then change discovery state to indicate completion.
2974
*
2975
* When running LE scanning and BR/EDR inquiry simultaneously
2976
* and the LE scan already finished, then change the discovery
2977
* state to indicate completion.
2978
*/
2979
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2980
!hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
2981
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2982
goto unlock;
2983
}
2984
2985
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2986
if (e && hci_resolve_name(hdev, e) == 0) {
2987
e->name_state = NAME_PENDING;
2988
hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2989
discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
2990
} else {
2991
/* When BR/EDR inquiry is active and no LE scanning is in
2992
* progress, then change discovery state to indicate completion.
2993
*
2994
* When running LE scanning and BR/EDR inquiry simultaneously
2995
* and the LE scan already finished, then change the discovery
2996
* state to indicate completion.
2997
*/
2998
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2999
!hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
3000
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3001
}
3002
3003
unlock:
3004
hci_dev_unlock(hdev);
3005
}
3006
3007
static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3008
struct sk_buff *skb)
3009
{
3010
struct hci_ev_inquiry_result *ev = edata;
3011
struct inquiry_data data;
3012
int i;
3013
3014
if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3015
flex_array_size(ev, info, ev->num)))
3016
return;
3017
3018
bt_dev_dbg(hdev, "num %d", ev->num);
3019
3020
if (!ev->num)
3021
return;
3022
3023
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3024
return;
3025
3026
hci_dev_lock(hdev);
3027
3028
for (i = 0; i < ev->num; i++) {
3029
struct inquiry_info *info = &ev->info[i];
3030
u32 flags;
3031
3032
bacpy(&data.bdaddr, &info->bdaddr);
3033
data.pscan_rep_mode = info->pscan_rep_mode;
3034
data.pscan_period_mode = info->pscan_period_mode;
3035
data.pscan_mode = info->pscan_mode;
3036
memcpy(data.dev_class, info->dev_class, 3);
3037
data.clock_offset = info->clock_offset;
3038
data.rssi = HCI_RSSI_INVALID;
3039
data.ssp_mode = 0x00;
3040
3041
flags = hci_inquiry_cache_update(hdev, &data, false);
3042
3043
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3044
info->dev_class, HCI_RSSI_INVALID,
3045
flags, NULL, 0, NULL, 0, 0);
3046
}
3047
3048
hci_dev_unlock(hdev);
3049
}
3050
3051
static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn)
3052
{
3053
struct hci_cp_read_enc_key_size cp;
3054
u8 *key_enc_size = hci_conn_key_enc_size(conn);
3055
3056
if (!read_key_size_capable(hdev)) {
3057
conn->enc_key_size = HCI_LINK_KEY_SIZE;
3058
return -EOPNOTSUPP;
3059
}
3060
3061
bt_dev_dbg(hdev, "hcon %p", conn);
3062
3063
memset(&cp, 0, sizeof(cp));
3064
cp.handle = cpu_to_le16(conn->handle);
3065
3066
/* If the key enc_size is already known, use it as conn->enc_key_size,
3067
* otherwise use hdev->min_enc_key_size so the likes of
3068
* l2cap_check_enc_key_size don't fail while waiting for
3069
* HCI_OP_READ_ENC_KEY_SIZE response.
3070
*/
3071
if (key_enc_size && *key_enc_size)
3072
conn->enc_key_size = *key_enc_size;
3073
else
3074
conn->enc_key_size = hdev->min_enc_key_size;
3075
3076
return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3077
}
3078
3079
static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3080
struct sk_buff *skb)
3081
{
3082
struct hci_ev_conn_complete *ev = data;
3083
struct hci_conn *conn;
3084
u8 status = ev->status;
3085
3086
bt_dev_dbg(hdev, "status 0x%2.2x", status);
3087
3088
hci_dev_lock(hdev);
3089
3090
/* Check for existing connection:
3091
*
3092
* 1. If it doesn't exist then it must be receiver/slave role.
3093
* 2. If it does exist confirm that it is connecting/BT_CONNECT in case
3094
* of initiator/master role since there could be a collision where
3095
* either side is attempting to connect or something like a fuzzing
3096
* testing is trying to play tricks to destroy the hcon object before
3097
* it even attempts to connect (e.g. hcon->state == BT_OPEN).
3098
*/
3099
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3100
if (!conn ||
3101
(conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
3102
/* In case of error status and there is no connection pending
3103
* just unlock as there is nothing to cleanup.
3104
*/
3105
if (ev->status)
3106
goto unlock;
3107
3108
/* Connection may not exist if auto-connected. Check the bredr
3109
* allowlist to see if this device is allowed to auto connect.
3110
* If link is an ACL type, create a connection class
3111
* automatically.
3112
*
3113
* Auto-connect will only occur if the event filter is
3114
* programmed with a given address. Right now, event filter is
3115
* only used during suspend.
3116
*/
3117
if (ev->link_type == ACL_LINK &&
3118
hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3119
&ev->bdaddr,
3120
BDADDR_BREDR)) {
3121
conn = hci_conn_add_unset(hdev, ev->link_type,
3122
&ev->bdaddr, 0,
3123
HCI_ROLE_SLAVE);
3124
if (IS_ERR(conn)) {
3125
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3126
goto unlock;
3127
}
3128
} else {
3129
if (ev->link_type != SCO_LINK)
3130
goto unlock;
3131
3132
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3133
&ev->bdaddr);
3134
if (!conn)
3135
goto unlock;
3136
3137
conn->type = SCO_LINK;
3138
}
3139
}
3140
3141
/* The HCI_Connection_Complete event is only sent once per connection.
3142
* Processing it more than once per connection can corrupt kernel memory.
3143
*
3144
* As the connection handle is set here for the first time, it indicates
3145
* whether the connection is already set up.
3146
*/
3147
if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3148
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3149
goto unlock;
3150
}
3151
3152
if (!status) {
3153
status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3154
if (status)
3155
goto done;
3156
3157
if (conn->type == ACL_LINK) {
3158
conn->state = BT_CONFIG;
3159
hci_conn_hold(conn);
3160
3161
if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3162
!hci_find_link_key(hdev, &ev->bdaddr))
3163
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3164
else
3165
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3166
} else
3167
conn->state = BT_CONNECTED;
3168
3169
hci_debugfs_create_conn(conn);
3170
hci_conn_add_sysfs(conn);
3171
3172
if (test_bit(HCI_AUTH, &hdev->flags))
3173
set_bit(HCI_CONN_AUTH, &conn->flags);
3174
3175
if (test_bit(HCI_ENCRYPT, &hdev->flags))
3176
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3177
3178
/* "Link key request" completed ahead of "connect request" completes */
3179
if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3180
ev->link_type == ACL_LINK) {
3181
struct link_key *key;
3182
3183
key = hci_find_link_key(hdev, &ev->bdaddr);
3184
if (key) {
3185
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3186
hci_read_enc_key_size(hdev, conn);
3187
hci_encrypt_cfm(conn, ev->status);
3188
}
3189
}
3190
3191
/* Get remote features */
3192
if (conn->type == ACL_LINK) {
3193
struct hci_cp_read_remote_features cp;
3194
cp.handle = ev->handle;
3195
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3196
sizeof(cp), &cp);
3197
3198
hci_update_scan(hdev);
3199
}
3200
3201
/* Set packet type for incoming connection */
3202
if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3203
struct hci_cp_change_conn_ptype cp;
3204
cp.handle = ev->handle;
3205
cp.pkt_type = cpu_to_le16(conn->pkt_type);
3206
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3207
&cp);
3208
}
3209
}
3210
3211
if (conn->type == ACL_LINK)
3212
hci_sco_setup(conn, ev->status);
3213
3214
done:
3215
if (status) {
3216
hci_conn_failed(conn, status);
3217
} else if (ev->link_type == SCO_LINK) {
3218
switch (conn->setting & SCO_AIRMODE_MASK) {
3219
case SCO_AIRMODE_CVSD:
3220
if (hdev->notify)
3221
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3222
break;
3223
}
3224
3225
hci_connect_cfm(conn, status);
3226
}
3227
3228
unlock:
3229
hci_dev_unlock(hdev);
3230
}
3231
3232
static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3233
{
3234
struct hci_cp_reject_conn_req cp;
3235
3236
bacpy(&cp.bdaddr, bdaddr);
3237
cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3238
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3239
}
3240
3241
static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3242
struct sk_buff *skb)
3243
{
3244
struct hci_ev_conn_request *ev = data;
3245
int mask = hdev->link_mode;
3246
struct inquiry_entry *ie;
3247
struct hci_conn *conn;
3248
__u8 flags = 0;
3249
3250
bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3251
3252
/* Reject incoming connection from device with same BD ADDR against
3253
* CVE-2020-26555
3254
*/
3255
if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3256
bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3257
&ev->bdaddr);
3258
hci_reject_conn(hdev, &ev->bdaddr);
3259
return;
3260
}
3261
3262
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3263
&flags);
3264
3265
if (!(mask & HCI_LM_ACCEPT)) {
3266
hci_reject_conn(hdev, &ev->bdaddr);
3267
return;
3268
}
3269
3270
hci_dev_lock(hdev);
3271
3272
if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3273
BDADDR_BREDR)) {
3274
hci_reject_conn(hdev, &ev->bdaddr);
3275
goto unlock;
3276
}
3277
3278
/* Require HCI_CONNECTABLE or an accept list entry to accept the
3279
* connection. These features are only touched through mgmt so
3280
* only do the checks if HCI_MGMT is set.
3281
*/
3282
if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3283
!hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3284
!hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3285
BDADDR_BREDR)) {
3286
hci_reject_conn(hdev, &ev->bdaddr);
3287
goto unlock;
3288
}
3289
3290
/* Connection accepted */
3291
3292
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3293
if (ie)
3294
memcpy(ie->data.dev_class, ev->dev_class, 3);
3295
3296
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3297
&ev->bdaddr);
3298
if (!conn) {
3299
conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, 0,
3300
HCI_ROLE_SLAVE);
3301
if (IS_ERR(conn)) {
3302
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
3303
goto unlock;
3304
}
3305
}
3306
3307
memcpy(conn->dev_class, ev->dev_class, 3);
3308
3309
hci_dev_unlock(hdev);
3310
3311
if (ev->link_type == ACL_LINK ||
3312
(!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3313
struct hci_cp_accept_conn_req cp;
3314
conn->state = BT_CONNECT;
3315
3316
bacpy(&cp.bdaddr, &ev->bdaddr);
3317
3318
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3319
cp.role = 0x00; /* Become central */
3320
else
3321
cp.role = 0x01; /* Remain peripheral */
3322
3323
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3324
} else if (!(flags & HCI_PROTO_DEFER)) {
3325
struct hci_cp_accept_sync_conn_req cp;
3326
conn->state = BT_CONNECT;
3327
3328
bacpy(&cp.bdaddr, &ev->bdaddr);
3329
cp.pkt_type = cpu_to_le16(conn->pkt_type);
3330
3331
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3332
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3333
cp.max_latency = cpu_to_le16(0xffff);
3334
cp.content_format = cpu_to_le16(hdev->voice_setting);
3335
cp.retrans_effort = 0xff;
3336
3337
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3338
&cp);
3339
} else {
3340
conn->state = BT_CONNECT2;
3341
hci_connect_cfm(conn, 0);
3342
}
3343
3344
return;
3345
unlock:
3346
hci_dev_unlock(hdev);
3347
}
3348
3349
static u8 hci_to_mgmt_reason(u8 err)
3350
{
3351
switch (err) {
3352
case HCI_ERROR_CONNECTION_TIMEOUT:
3353
return MGMT_DEV_DISCONN_TIMEOUT;
3354
case HCI_ERROR_REMOTE_USER_TERM:
3355
case HCI_ERROR_REMOTE_LOW_RESOURCES:
3356
case HCI_ERROR_REMOTE_POWER_OFF:
3357
return MGMT_DEV_DISCONN_REMOTE;
3358
case HCI_ERROR_LOCAL_HOST_TERM:
3359
return MGMT_DEV_DISCONN_LOCAL_HOST;
3360
default:
3361
return MGMT_DEV_DISCONN_UNKNOWN;
3362
}
3363
}
3364
3365
static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3366
struct sk_buff *skb)
3367
{
3368
struct hci_ev_disconn_complete *ev = data;
3369
u8 reason;
3370
struct hci_conn_params *params;
3371
struct hci_conn *conn;
3372
bool mgmt_connected;
3373
3374
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3375
3376
hci_dev_lock(hdev);
3377
3378
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3379
if (!conn)
3380
goto unlock;
3381
3382
if (ev->status) {
3383
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3384
conn->dst_type, ev->status);
3385
goto unlock;
3386
}
3387
3388
conn->state = BT_CLOSED;
3389
3390
mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3391
3392
if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3393
reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3394
else
3395
reason = hci_to_mgmt_reason(ev->reason);
3396
3397
mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3398
reason, mgmt_connected);
3399
3400
if (conn->type == ACL_LINK) {
3401
if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3402
hci_remove_link_key(hdev, &conn->dst);
3403
3404
hci_update_scan(hdev);
3405
}
3406
3407
/* Re-enable passive scanning if disconnected device is marked
3408
* as auto-connectable.
3409
*/
3410
if (conn->type == LE_LINK) {
3411
params = hci_conn_params_lookup(hdev, &conn->dst,
3412
conn->dst_type);
3413
if (params) {
3414
switch (params->auto_connect) {
3415
case HCI_AUTO_CONN_LINK_LOSS:
3416
if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3417
break;
3418
fallthrough;
3419
3420
case HCI_AUTO_CONN_DIRECT:
3421
case HCI_AUTO_CONN_ALWAYS:
3422
hci_pend_le_list_del_init(params);
3423
hci_pend_le_list_add(params,
3424
&hdev->pend_le_conns);
3425
hci_update_passive_scan(hdev);
3426
break;
3427
3428
default:
3429
break;
3430
}
3431
}
3432
}
3433
3434
hci_disconn_cfm(conn, ev->reason);
3435
3436
/* Re-enable advertising if necessary, since it might
3437
* have been disabled by the connection. From the
3438
* HCI_LE_Set_Advertise_Enable command description in
3439
* the core specification (v4.0):
3440
* "The Controller shall continue advertising until the Host
3441
* issues an LE_Set_Advertise_Enable command with
3442
* Advertising_Enable set to 0x00 (Advertising is disabled)
3443
* or until a connection is created or until the Advertising
3444
* is timed out due to Directed Advertising."
3445
*/
3446
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3447
hdev->cur_adv_instance = conn->adv_instance;
3448
hci_enable_advertising(hdev);
3449
}
3450
3451
hci_conn_del(conn);
3452
3453
unlock:
3454
hci_dev_unlock(hdev);
3455
}
3456
3457
static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3458
struct sk_buff *skb)
3459
{
3460
struct hci_ev_auth_complete *ev = data;
3461
struct hci_conn *conn;
3462
3463
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3464
3465
hci_dev_lock(hdev);
3466
3467
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3468
if (!conn)
3469
goto unlock;
3470
3471
if (!ev->status) {
3472
clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3473
set_bit(HCI_CONN_AUTH, &conn->flags);
3474
conn->sec_level = conn->pending_sec_level;
3475
} else {
3476
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3477
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3478
3479
mgmt_auth_failed(conn, ev->status);
3480
}
3481
3482
clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3483
3484
if (conn->state == BT_CONFIG) {
3485
if (!ev->status && hci_conn_ssp_enabled(conn)) {
3486
struct hci_cp_set_conn_encrypt cp;
3487
cp.handle = ev->handle;
3488
cp.encrypt = 0x01;
3489
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3490
&cp);
3491
} else {
3492
conn->state = BT_CONNECTED;
3493
hci_connect_cfm(conn, ev->status);
3494
hci_conn_drop(conn);
3495
}
3496
} else {
3497
hci_auth_cfm(conn, ev->status);
3498
3499
hci_conn_hold(conn);
3500
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3501
hci_conn_drop(conn);
3502
}
3503
3504
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3505
if (!ev->status) {
3506
struct hci_cp_set_conn_encrypt cp;
3507
cp.handle = ev->handle;
3508
cp.encrypt = 0x01;
3509
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3510
&cp);
3511
} else {
3512
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3513
hci_encrypt_cfm(conn, ev->status);
3514
}
3515
}
3516
3517
unlock:
3518
hci_dev_unlock(hdev);
3519
}
3520
3521
static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3522
struct sk_buff *skb)
3523
{
3524
struct hci_ev_remote_name *ev = data;
3525
struct hci_conn *conn;
3526
3527
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3528
3529
hci_dev_lock(hdev);
3530
3531
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3532
3533
if (!hci_dev_test_flag(hdev, HCI_MGMT))
3534
goto check_auth;
3535
3536
if (ev->status == 0)
3537
hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3538
strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3539
else
3540
hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3541
3542
check_auth:
3543
if (!conn)
3544
goto unlock;
3545
3546
if (!hci_outgoing_auth_needed(hdev, conn))
3547
goto unlock;
3548
3549
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3550
struct hci_cp_auth_requested cp;
3551
3552
set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3553
3554
cp.handle = __cpu_to_le16(conn->handle);
3555
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3556
}
3557
3558
unlock:
3559
hci_dev_unlock(hdev);
3560
}
3561
3562
static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3563
struct sk_buff *skb)
3564
{
3565
struct hci_ev_encrypt_change *ev = data;
3566
struct hci_conn *conn;
3567
3568
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3569
3570
hci_dev_lock(hdev);
3571
3572
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3573
if (!conn)
3574
goto unlock;
3575
3576
if (!ev->status) {
3577
if (ev->encrypt) {
3578
/* Encryption implies authentication */
3579
set_bit(HCI_CONN_AUTH, &conn->flags);
3580
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3581
conn->sec_level = conn->pending_sec_level;
3582
3583
/* P-256 authentication key implies FIPS */
3584
if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3585
set_bit(HCI_CONN_FIPS, &conn->flags);
3586
3587
if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3588
conn->type == LE_LINK)
3589
set_bit(HCI_CONN_AES_CCM, &conn->flags);
3590
} else {
3591
clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3592
clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3593
}
3594
}
3595
3596
/* We should disregard the current RPA and generate a new one
3597
* whenever the encryption procedure fails.
3598
*/
3599
if (ev->status && conn->type == LE_LINK) {
3600
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3601
hci_adv_instances_set_rpa_expired(hdev, true);
3602
}
3603
3604
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3605
3606
/* Check link security requirements are met */
3607
if (!hci_conn_check_link_mode(conn))
3608
ev->status = HCI_ERROR_AUTH_FAILURE;
3609
3610
if (ev->status && conn->state == BT_CONNECTED) {
3611
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3612
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3613
3614
/* Notify upper layers so they can cleanup before
3615
* disconnecting.
3616
*/
3617
hci_encrypt_cfm(conn, ev->status);
3618
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3619
hci_conn_drop(conn);
3620
goto unlock;
3621
}
3622
3623
/* Try reading the encryption key size for encrypted ACL links */
3624
if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3625
if (hci_read_enc_key_size(hdev, conn))
3626
goto notify;
3627
3628
goto unlock;
3629
}
3630
3631
/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
3632
* to avoid unexpected SMP command errors when pairing.
3633
*/
3634
if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT))
3635
goto notify;
3636
3637
/* Set the default Authenticated Payload Timeout after
3638
* an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3639
* Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3640
* sent when the link is active and Encryption is enabled, the conn
3641
* type can be either LE or ACL and controller must support LMP Ping.
3642
* Ensure for AES-CCM encryption as well.
3643
*/
3644
if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3645
test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3646
((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3647
(conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3648
struct hci_cp_write_auth_payload_to cp;
3649
3650
cp.handle = cpu_to_le16(conn->handle);
3651
cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3652
if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3653
sizeof(cp), &cp))
3654
bt_dev_err(hdev, "write auth payload timeout failed");
3655
}
3656
3657
notify:
3658
hci_encrypt_cfm(conn, ev->status);
3659
3660
unlock:
3661
hci_dev_unlock(hdev);
3662
}
3663
3664
static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3665
struct sk_buff *skb)
3666
{
3667
struct hci_ev_change_link_key_complete *ev = data;
3668
struct hci_conn *conn;
3669
3670
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3671
3672
hci_dev_lock(hdev);
3673
3674
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3675
if (conn) {
3676
if (!ev->status)
3677
set_bit(HCI_CONN_SECURE, &conn->flags);
3678
3679
clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3680
3681
hci_key_change_cfm(conn, ev->status);
3682
}
3683
3684
hci_dev_unlock(hdev);
3685
}
3686
3687
static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3688
struct sk_buff *skb)
3689
{
3690
struct hci_ev_remote_features *ev = data;
3691
struct hci_conn *conn;
3692
3693
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3694
3695
hci_dev_lock(hdev);
3696
3697
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3698
if (!conn)
3699
goto unlock;
3700
3701
if (!ev->status)
3702
memcpy(conn->features[0], ev->features, 8);
3703
3704
if (conn->state != BT_CONFIG)
3705
goto unlock;
3706
3707
if (!ev->status && lmp_ext_feat_capable(hdev) &&
3708
lmp_ext_feat_capable(conn)) {
3709
struct hci_cp_read_remote_ext_features cp;
3710
cp.handle = ev->handle;
3711
cp.page = 0x01;
3712
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3713
sizeof(cp), &cp);
3714
goto unlock;
3715
}
3716
3717
if (!ev->status) {
3718
struct hci_cp_remote_name_req cp;
3719
memset(&cp, 0, sizeof(cp));
3720
bacpy(&cp.bdaddr, &conn->dst);
3721
cp.pscan_rep_mode = 0x02;
3722
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3723
} else {
3724
mgmt_device_connected(hdev, conn, NULL, 0);
3725
}
3726
3727
if (!hci_outgoing_auth_needed(hdev, conn)) {
3728
conn->state = BT_CONNECTED;
3729
hci_connect_cfm(conn, ev->status);
3730
hci_conn_drop(conn);
3731
}
3732
3733
unlock:
3734
hci_dev_unlock(hdev);
3735
}
3736
3737
static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3738
{
3739
cancel_delayed_work(&hdev->cmd_timer);
3740
3741
rcu_read_lock();
3742
if (!test_bit(HCI_RESET, &hdev->flags)) {
3743
if (ncmd) {
3744
cancel_delayed_work(&hdev->ncmd_timer);
3745
atomic_set(&hdev->cmd_cnt, 1);
3746
} else {
3747
if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3748
queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3749
HCI_NCMD_TIMEOUT);
3750
}
3751
}
3752
rcu_read_unlock();
3753
}
3754
3755
static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3756
struct sk_buff *skb)
3757
{
3758
struct hci_rp_le_read_buffer_size_v2 *rp = data;
3759
3760
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3761
3762
if (rp->status)
3763
return rp->status;
3764
3765
hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3766
hdev->le_pkts = rp->acl_max_pkt;
3767
hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3768
hdev->iso_pkts = rp->iso_max_pkt;
3769
3770
hdev->le_cnt = hdev->le_pkts;
3771
hdev->iso_cnt = hdev->iso_pkts;
3772
3773
BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3774
hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3775
3776
if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
3777
return HCI_ERROR_INVALID_PARAMETERS;
3778
3779
return rp->status;
3780
}
3781
3782
static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3783
{
3784
struct hci_conn *conn, *tmp;
3785
3786
lockdep_assert_held(&hdev->lock);
3787
3788
list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3789
if (conn->type != CIS_LINK ||
3790
conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3791
continue;
3792
3793
if (HCI_CONN_HANDLE_UNSET(conn->handle))
3794
hci_conn_failed(conn, status);
3795
}
3796
}
3797
3798
static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3799
struct sk_buff *skb)
3800
{
3801
struct hci_rp_le_set_cig_params *rp = data;
3802
struct hci_cp_le_set_cig_params *cp;
3803
struct hci_conn *conn;
3804
u8 status = rp->status;
3805
bool pending = false;
3806
int i;
3807
3808
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3809
3810
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3811
if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3812
rp->cig_id != cp->cig_id)) {
3813
bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3814
status = HCI_ERROR_UNSPECIFIED;
3815
}
3816
3817
hci_dev_lock(hdev);
3818
3819
/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3820
*
3821
* If the Status return parameter is non-zero, then the state of the CIG
3822
* and its CIS configurations shall not be changed by the command. If
3823
* the CIG did not already exist, it shall not be created.
3824
*/
3825
if (status) {
3826
/* Keep current configuration, fail only the unbound CIS */
3827
hci_unbound_cis_failed(hdev, rp->cig_id, status);
3828
goto unlock;
3829
}
3830
3831
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3832
*
3833
* If the Status return parameter is zero, then the Controller shall
3834
* set the Connection_Handle arrayed return parameter to the connection
3835
* handle(s) corresponding to the CIS configurations specified in
3836
* the CIS_IDs command parameter, in the same order.
3837
*/
3838
for (i = 0; i < rp->num_handles; ++i) {
3839
conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3840
cp->cis[i].cis_id);
3841
if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3842
continue;
3843
3844
if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3845
continue;
3846
3847
if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3848
continue;
3849
3850
if (conn->state == BT_CONNECT)
3851
pending = true;
3852
}
3853
3854
unlock:
3855
if (pending)
3856
hci_le_create_cis_pending(hdev);
3857
3858
hci_dev_unlock(hdev);
3859
3860
return rp->status;
3861
}
3862
3863
static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3864
struct sk_buff *skb)
3865
{
3866
struct hci_rp_le_setup_iso_path *rp = data;
3867
struct hci_cp_le_setup_iso_path *cp;
3868
struct hci_conn *conn;
3869
3870
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3871
3872
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3873
if (!cp)
3874
return rp->status;
3875
3876
hci_dev_lock(hdev);
3877
3878
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3879
if (!conn)
3880
goto unlock;
3881
3882
if (rp->status) {
3883
hci_connect_cfm(conn, rp->status);
3884
hci_conn_del(conn);
3885
goto unlock;
3886
}
3887
3888
switch (cp->direction) {
3889
/* Input (Host to Controller) */
3890
case 0x00:
3891
/* Only confirm connection if output only */
3892
if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3893
hci_connect_cfm(conn, rp->status);
3894
break;
3895
/* Output (Controller to Host) */
3896
case 0x01:
3897
/* Confirm connection since conn->iso_qos is always configured
3898
* last.
3899
*/
3900
hci_connect_cfm(conn, rp->status);
3901
3902
/* Notify device connected in case it is a BIG Sync */
3903
if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3904
mgmt_device_connected(hdev, conn, NULL, 0);
3905
3906
break;
3907
}
3908
3909
unlock:
3910
hci_dev_unlock(hdev);
3911
return rp->status;
3912
}
3913
3914
static u8 hci_cc_le_read_all_local_features(struct hci_dev *hdev, void *data,
3915
struct sk_buff *skb)
3916
{
3917
struct hci_rp_le_read_all_local_features *rp = data;
3918
3919
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3920
3921
if (rp->status)
3922
return rp->status;
3923
3924
memcpy(hdev->le_features, rp->features, 248);
3925
3926
return rp->status;
3927
}
3928
3929
static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3930
{
3931
bt_dev_dbg(hdev, "status 0x%2.2x", status);
3932
}
3933
3934
static void hci_cs_le_read_all_remote_features(struct hci_dev *hdev, u8 status)
3935
{
3936
struct hci_cp_le_read_remote_features *cp;
3937
struct hci_conn *conn;
3938
3939
bt_dev_dbg(hdev, "status 0x%2.2x", status);
3940
3941
if (!status)
3942
return;
3943
3944
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_ALL_REMOTE_FEATURES);
3945
if (!cp)
3946
return;
3947
3948
hci_dev_lock(hdev);
3949
3950
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3951
if (conn && conn->state == BT_CONFIG)
3952
hci_connect_cfm(conn, status);
3953
3954
hci_dev_unlock(hdev);
3955
}
3956
3957
static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3958
struct sk_buff *skb)
3959
{
3960
struct hci_ev_status *rp = data;
3961
struct hci_cp_le_set_per_adv_params *cp;
3962
3963
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3964
3965
if (rp->status)
3966
return rp->status;
3967
3968
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3969
if (!cp)
3970
return rp->status;
3971
3972
/* TODO: set the conn state */
3973
return rp->status;
3974
}
3975
3976
static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3977
struct sk_buff *skb)
3978
{
3979
struct hci_ev_status *rp = data;
3980
struct hci_cp_le_set_per_adv_enable *cp;
3981
struct adv_info *adv = NULL, *n;
3982
u8 per_adv_cnt = 0;
3983
3984
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3985
3986
if (rp->status)
3987
return rp->status;
3988
3989
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3990
if (!cp)
3991
return rp->status;
3992
3993
hci_dev_lock(hdev);
3994
3995
adv = hci_find_adv_instance(hdev, cp->handle);
3996
3997
if (cp->enable) {
3998
hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3999
4000
if (adv)
4001
adv->periodic_enabled = true;
4002
} else {
4003
if (adv)
4004
adv->periodic_enabled = false;
4005
4006
/* If just one instance was disabled check if there are
4007
* any other instance enabled before clearing HCI_LE_PER_ADV.
4008
* The current periodic adv instance will be marked as
4009
* disabled once extended advertising is also disabled.
4010
*/
4011
list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4012
list) {
4013
if (adv->periodic && adv->enabled)
4014
per_adv_cnt++;
4015
}
4016
4017
if (per_adv_cnt > 1)
4018
goto unlock;
4019
4020
hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4021
}
4022
4023
unlock:
4024
hci_dev_unlock(hdev);
4025
4026
return rp->status;
4027
}
4028
4029
#define HCI_CC_VL(_op, _func, _min, _max) \
4030
{ \
4031
.op = _op, \
4032
.func = _func, \
4033
.min_len = _min, \
4034
.max_len = _max, \
4035
}
4036
4037
#define HCI_CC(_op, _func, _len) \
4038
HCI_CC_VL(_op, _func, _len, _len)
4039
4040
#define HCI_CC_STATUS(_op, _func) \
4041
HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4042
4043
static const struct hci_cc {
4044
u16 op;
4045
u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4046
u16 min_len;
4047
u16 max_len;
4048
} hci_cc_table[] = {
4049
HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4050
HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4051
HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4052
HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel,
4053
sizeof(struct hci_rp_remote_name_req_cancel)),
4054
HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4055
sizeof(struct hci_rp_role_discovery)),
4056
HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4057
sizeof(struct hci_rp_read_link_policy)),
4058
HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4059
sizeof(struct hci_rp_write_link_policy)),
4060
HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4061
sizeof(struct hci_rp_read_def_link_policy)),
4062
HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4063
hci_cc_write_def_link_policy),
4064
HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4065
HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4066
sizeof(struct hci_rp_read_stored_link_key)),
4067
HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4068
sizeof(struct hci_rp_delete_stored_link_key)),
4069
HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4070
HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4071
sizeof(struct hci_rp_read_local_name)),
4072
HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4073
HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4074
HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4075
HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4076
HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4077
sizeof(struct hci_rp_read_class_of_dev)),
4078
HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4079
HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4080
sizeof(struct hci_rp_read_voice_setting)),
4081
HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4082
HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4083
sizeof(struct hci_rp_read_num_supported_iac)),
4084
HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4085
HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4086
HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4087
sizeof(struct hci_rp_read_auth_payload_to)),
4088
HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4089
sizeof(struct hci_rp_write_auth_payload_to)),
4090
HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4091
sizeof(struct hci_rp_read_local_version)),
4092
HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4093
sizeof(struct hci_rp_read_local_commands)),
4094
HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4095
sizeof(struct hci_rp_read_local_features)),
4096
HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4097
sizeof(struct hci_rp_read_local_ext_features)),
4098
HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4099
sizeof(struct hci_rp_read_buffer_size)),
4100
HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4101
sizeof(struct hci_rp_read_bd_addr)),
4102
HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4103
sizeof(struct hci_rp_read_local_pairing_opts)),
4104
HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4105
sizeof(struct hci_rp_read_page_scan_activity)),
4106
HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4107
hci_cc_write_page_scan_activity),
4108
HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4109
sizeof(struct hci_rp_read_page_scan_type)),
4110
HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4111
HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4112
sizeof(struct hci_rp_read_clock)),
4113
HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4114
sizeof(struct hci_rp_read_enc_key_size)),
4115
HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4116
sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4117
HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4118
hci_cc_read_def_err_data_reporting,
4119
sizeof(struct hci_rp_read_def_err_data_reporting)),
4120
HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4121
hci_cc_write_def_err_data_reporting),
4122
HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4123
sizeof(struct hci_rp_pin_code_reply)),
4124
HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4125
sizeof(struct hci_rp_pin_code_neg_reply)),
4126
HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4127
sizeof(struct hci_rp_read_local_oob_data)),
4128
HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4129
sizeof(struct hci_rp_read_local_oob_ext_data)),
4130
HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4131
sizeof(struct hci_rp_le_read_buffer_size)),
4132
HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4133
sizeof(struct hci_rp_le_read_local_features)),
4134
HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4135
sizeof(struct hci_rp_le_read_adv_tx_power)),
4136
HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4137
sizeof(struct hci_rp_user_confirm_reply)),
4138
HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4139
sizeof(struct hci_rp_user_confirm_reply)),
4140
HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4141
sizeof(struct hci_rp_user_confirm_reply)),
4142
HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4143
sizeof(struct hci_rp_user_confirm_reply)),
4144
HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4145
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4146
HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4147
HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4148
HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4149
hci_cc_le_read_accept_list_size,
4150
sizeof(struct hci_rp_le_read_accept_list_size)),
4151
HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4152
HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4153
hci_cc_le_add_to_accept_list),
4154
HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4155
hci_cc_le_del_from_accept_list),
4156
HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4157
sizeof(struct hci_rp_le_read_supported_states)),
4158
HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4159
sizeof(struct hci_rp_le_read_def_data_len)),
4160
HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4161
hci_cc_le_write_def_data_len),
4162
HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4163
hci_cc_le_add_to_resolv_list),
4164
HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4165
hci_cc_le_del_from_resolv_list),
4166
HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4167
hci_cc_le_clear_resolv_list),
4168
HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4169
sizeof(struct hci_rp_le_read_resolv_list_size)),
4170
HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4171
hci_cc_le_set_addr_resolution_enable),
4172
HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4173
sizeof(struct hci_rp_le_read_max_data_len)),
4174
HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4175
hci_cc_write_le_host_supported),
4176
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4177
HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4178
sizeof(struct hci_rp_read_rssi)),
4179
HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4180
sizeof(struct hci_rp_read_tx_power)),
4181
HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4182
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4183
hci_cc_le_set_ext_scan_param),
4184
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4185
hci_cc_le_set_ext_scan_enable),
4186
HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4187
HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4188
hci_cc_le_read_num_adv_sets,
4189
sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4190
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4191
hci_cc_le_set_ext_adv_enable),
4192
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4193
hci_cc_le_set_adv_set_random_addr),
4194
HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4195
HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4196
HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4197
HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4198
hci_cc_le_set_per_adv_enable),
4199
HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4200
sizeof(struct hci_rp_le_read_transmit_power)),
4201
HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4202
HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4203
sizeof(struct hci_rp_le_read_buffer_size_v2)),
4204
HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4205
sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4206
HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4207
sizeof(struct hci_rp_le_setup_iso_path)),
4208
HCI_CC(HCI_OP_LE_READ_ALL_LOCAL_FEATURES,
4209
hci_cc_le_read_all_local_features,
4210
sizeof(struct hci_rp_le_read_all_local_features)),
4211
};
4212
4213
static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4214
struct sk_buff *skb)
4215
{
4216
void *data;
4217
4218
if (skb->len < cc->min_len) {
4219
bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4220
cc->op, skb->len, cc->min_len);
4221
return HCI_ERROR_UNSPECIFIED;
4222
}
4223
4224
/* Just warn if the length is over max_len size it still be possible to
4225
* partially parse the cc so leave to callback to decide if that is
4226
* acceptable.
4227
*/
4228
if (skb->len > cc->max_len)
4229
bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4230
cc->op, skb->len, cc->max_len);
4231
4232
data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4233
if (!data)
4234
return HCI_ERROR_UNSPECIFIED;
4235
4236
return cc->func(hdev, data, skb);
4237
}
4238
4239
static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4240
struct sk_buff *skb, u16 *opcode, u8 *status,
4241
hci_req_complete_t *req_complete,
4242
hci_req_complete_skb_t *req_complete_skb)
4243
{
4244
struct hci_ev_cmd_complete *ev = data;
4245
int i;
4246
4247
*opcode = __le16_to_cpu(ev->opcode);
4248
4249
bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4250
4251
for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4252
if (hci_cc_table[i].op == *opcode) {
4253
*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4254
break;
4255
}
4256
}
4257
4258
if (i == ARRAY_SIZE(hci_cc_table)) {
4259
if (!skb->len) {
4260
bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status",
4261
*opcode);
4262
*status = HCI_ERROR_UNSPECIFIED;
4263
return;
4264
}
4265
4266
/* Unknown opcode, assume byte 0 contains the status, so
4267
* that e.g. __hci_cmd_sync() properly returns errors
4268
* for vendor specific commands send by HCI drivers.
4269
* If a vendor doesn't actually follow this convention we may
4270
* need to introduce a vendor CC table in order to properly set
4271
* the status.
4272
*/
4273
*status = skb->data[0];
4274
}
4275
4276
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4277
4278
hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4279
req_complete_skb);
4280
4281
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4282
bt_dev_err(hdev,
4283
"unexpected event for opcode 0x%4.4x", *opcode);
4284
return;
4285
}
4286
4287
if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4288
queue_work(hdev->workqueue, &hdev->cmd_work);
4289
}
4290
4291
static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4292
{
4293
struct hci_cp_le_create_cis *cp;
4294
bool pending = false;
4295
int i;
4296
4297
bt_dev_dbg(hdev, "status 0x%2.2x", status);
4298
4299
if (!status)
4300
return;
4301
4302
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4303
if (!cp)
4304
return;
4305
4306
hci_dev_lock(hdev);
4307
4308
/* Remove connection if command failed */
4309
for (i = 0; i < cp->num_cis; i++) {
4310
struct hci_conn *conn;
4311
u16 handle;
4312
4313
handle = __le16_to_cpu(cp->cis[i].cis_handle);
4314
4315
conn = hci_conn_hash_lookup_handle(hdev, handle);
4316
if (conn) {
4317
if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4318
&conn->flags))
4319
pending = true;
4320
conn->state = BT_CLOSED;
4321
hci_connect_cfm(conn, status);
4322
hci_conn_del(conn);
4323
}
4324
}
4325
cp->num_cis = 0;
4326
4327
if (pending)
4328
hci_le_create_cis_pending(hdev);
4329
4330
hci_dev_unlock(hdev);
4331
}
4332
4333
#define HCI_CS(_op, _func) \
4334
{ \
4335
.op = _op, \
4336
.func = _func, \
4337
}
4338
4339
static const struct hci_cs {
4340
u16 op;
4341
void (*func)(struct hci_dev *hdev, __u8 status);
4342
} hci_cs_table[] = {
4343
HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4344
HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4345
HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4346
HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4347
HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4348
HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4349
HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4350
HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4351
HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4352
hci_cs_read_remote_ext_features),
4353
HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4354
HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4355
hci_cs_enhanced_setup_sync_conn),
4356
HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4357
HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4358
HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4359
HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4360
HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4361
HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4362
HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4363
HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4364
HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4365
HCI_CS(HCI_OP_LE_READ_ALL_REMOTE_FEATURES,
4366
hci_cs_le_read_all_remote_features),
4367
};
4368
4369
static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4370
struct sk_buff *skb, u16 *opcode, u8 *status,
4371
hci_req_complete_t *req_complete,
4372
hci_req_complete_skb_t *req_complete_skb)
4373
{
4374
struct hci_ev_cmd_status *ev = data;
4375
int i;
4376
4377
*opcode = __le16_to_cpu(ev->opcode);
4378
*status = ev->status;
4379
4380
bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4381
4382
for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4383
if (hci_cs_table[i].op == *opcode) {
4384
hci_cs_table[i].func(hdev, ev->status);
4385
break;
4386
}
4387
}
4388
4389
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4390
4391
/* Indicate request completion if the command failed. Also, if
4392
* we're not waiting for a special event and we get a success
4393
* command status we should try to flag the request as completed
4394
* (since for this kind of commands there will not be a command
4395
* complete event).
4396
*/
4397
if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4398
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4399
req_complete_skb);
4400
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4401
bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4402
*opcode);
4403
return;
4404
}
4405
}
4406
4407
if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4408
queue_work(hdev->workqueue, &hdev->cmd_work);
4409
}
4410
4411
static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4412
struct sk_buff *skb)
4413
{
4414
struct hci_ev_hardware_error *ev = data;
4415
4416
bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4417
4418
hdev->hw_error_code = ev->code;
4419
4420
queue_work(hdev->req_workqueue, &hdev->error_reset);
4421
}
4422
4423
static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4424
struct sk_buff *skb)
4425
{
4426
struct hci_ev_role_change *ev = data;
4427
struct hci_conn *conn;
4428
4429
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4430
4431
hci_dev_lock(hdev);
4432
4433
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4434
if (conn) {
4435
if (!ev->status)
4436
conn->role = ev->role;
4437
4438
clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4439
4440
hci_role_switch_cfm(conn, ev->status, ev->role);
4441
}
4442
4443
hci_dev_unlock(hdev);
4444
}
4445
4446
static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4447
struct sk_buff *skb)
4448
{
4449
struct hci_ev_num_comp_pkts *ev = data;
4450
int i;
4451
4452
if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4453
flex_array_size(ev, handles, ev->num)))
4454
return;
4455
4456
bt_dev_dbg(hdev, "num %d", ev->num);
4457
4458
hci_dev_lock(hdev);
4459
4460
for (i = 0; i < ev->num; i++) {
4461
struct hci_comp_pkts_info *info = &ev->handles[i];
4462
struct hci_conn *conn;
4463
__u16 handle, count;
4464
unsigned int i;
4465
4466
handle = __le16_to_cpu(info->handle);
4467
count = __le16_to_cpu(info->count);
4468
4469
conn = hci_conn_hash_lookup_handle(hdev, handle);
4470
if (!conn)
4471
continue;
4472
4473
/* Check if there is really enough packets outstanding before
4474
* attempting to decrease the sent counter otherwise it could
4475
* underflow..
4476
*/
4477
if (conn->sent >= count) {
4478
conn->sent -= count;
4479
} else {
4480
bt_dev_warn(hdev, "hcon %p sent %u < count %u",
4481
conn, conn->sent, count);
4482
conn->sent = 0;
4483
}
4484
4485
for (i = 0; i < count; ++i)
4486
hci_conn_tx_dequeue(conn);
4487
4488
switch (conn->type) {
4489
case ACL_LINK:
4490
hdev->acl_cnt += count;
4491
if (hdev->acl_cnt > hdev->acl_pkts)
4492
hdev->acl_cnt = hdev->acl_pkts;
4493
break;
4494
4495
case LE_LINK:
4496
if (hdev->le_pkts) {
4497
hdev->le_cnt += count;
4498
if (hdev->le_cnt > hdev->le_pkts)
4499
hdev->le_cnt = hdev->le_pkts;
4500
} else {
4501
hdev->acl_cnt += count;
4502
if (hdev->acl_cnt > hdev->acl_pkts)
4503
hdev->acl_cnt = hdev->acl_pkts;
4504
}
4505
break;
4506
4507
case SCO_LINK:
4508
case ESCO_LINK:
4509
hdev->sco_cnt += count;
4510
if (hdev->sco_cnt > hdev->sco_pkts)
4511
hdev->sco_cnt = hdev->sco_pkts;
4512
4513
break;
4514
4515
case CIS_LINK:
4516
case BIS_LINK:
4517
case PA_LINK:
4518
hdev->iso_cnt += count;
4519
if (hdev->iso_cnt > hdev->iso_pkts)
4520
hdev->iso_cnt = hdev->iso_pkts;
4521
break;
4522
4523
default:
4524
bt_dev_err(hdev, "unknown type %d conn %p",
4525
conn->type, conn);
4526
break;
4527
}
4528
}
4529
4530
queue_work(hdev->workqueue, &hdev->tx_work);
4531
4532
hci_dev_unlock(hdev);
4533
}
4534
4535
static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4536
struct sk_buff *skb)
4537
{
4538
struct hci_ev_mode_change *ev = data;
4539
struct hci_conn *conn;
4540
4541
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4542
4543
hci_dev_lock(hdev);
4544
4545
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4546
if (conn) {
4547
conn->mode = ev->mode;
4548
4549
if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4550
&conn->flags)) {
4551
if (conn->mode == HCI_CM_ACTIVE)
4552
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4553
else
4554
clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4555
}
4556
4557
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4558
hci_sco_setup(conn, ev->status);
4559
}
4560
4561
hci_dev_unlock(hdev);
4562
}
4563
4564
static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4565
struct sk_buff *skb)
4566
{
4567
struct hci_ev_pin_code_req *ev = data;
4568
struct hci_conn *conn;
4569
4570
bt_dev_dbg(hdev, "");
4571
4572
hci_dev_lock(hdev);
4573
4574
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4575
if (!conn)
4576
goto unlock;
4577
4578
if (conn->state == BT_CONNECTED) {
4579
hci_conn_hold(conn);
4580
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4581
hci_conn_drop(conn);
4582
}
4583
4584
if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4585
!test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4586
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4587
sizeof(ev->bdaddr), &ev->bdaddr);
4588
} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4589
u8 secure;
4590
4591
if (conn->pending_sec_level == BT_SECURITY_HIGH)
4592
secure = 1;
4593
else
4594
secure = 0;
4595
4596
mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4597
}
4598
4599
unlock:
4600
hci_dev_unlock(hdev);
4601
}
4602
4603
static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4604
{
4605
if (key_type == HCI_LK_CHANGED_COMBINATION)
4606
return;
4607
4608
conn->pin_length = pin_len;
4609
conn->key_type = key_type;
4610
4611
switch (key_type) {
4612
case HCI_LK_LOCAL_UNIT:
4613
case HCI_LK_REMOTE_UNIT:
4614
case HCI_LK_DEBUG_COMBINATION:
4615
return;
4616
case HCI_LK_COMBINATION:
4617
if (pin_len == 16)
4618
conn->pending_sec_level = BT_SECURITY_HIGH;
4619
else
4620
conn->pending_sec_level = BT_SECURITY_MEDIUM;
4621
break;
4622
case HCI_LK_UNAUTH_COMBINATION_P192:
4623
case HCI_LK_UNAUTH_COMBINATION_P256:
4624
conn->pending_sec_level = BT_SECURITY_MEDIUM;
4625
break;
4626
case HCI_LK_AUTH_COMBINATION_P192:
4627
conn->pending_sec_level = BT_SECURITY_HIGH;
4628
break;
4629
case HCI_LK_AUTH_COMBINATION_P256:
4630
conn->pending_sec_level = BT_SECURITY_FIPS;
4631
break;
4632
}
4633
}
4634
4635
static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4636
struct sk_buff *skb)
4637
{
4638
struct hci_ev_link_key_req *ev = data;
4639
struct hci_cp_link_key_reply cp;
4640
struct hci_conn *conn;
4641
struct link_key *key;
4642
4643
bt_dev_dbg(hdev, "");
4644
4645
if (!hci_dev_test_flag(hdev, HCI_MGMT))
4646
return;
4647
4648
hci_dev_lock(hdev);
4649
4650
key = hci_find_link_key(hdev, &ev->bdaddr);
4651
if (!key) {
4652
bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4653
goto not_found;
4654
}
4655
4656
bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4657
4658
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4659
if (conn) {
4660
clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4661
4662
if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4663
key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4664
conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4665
bt_dev_dbg(hdev, "ignoring unauthenticated key");
4666
goto not_found;
4667
}
4668
4669
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4670
(conn->pending_sec_level == BT_SECURITY_HIGH ||
4671
conn->pending_sec_level == BT_SECURITY_FIPS)) {
4672
bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4673
goto not_found;
4674
}
4675
4676
conn_set_key(conn, key->type, key->pin_len);
4677
}
4678
4679
bacpy(&cp.bdaddr, &ev->bdaddr);
4680
memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4681
4682
hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4683
4684
hci_dev_unlock(hdev);
4685
4686
return;
4687
4688
not_found:
4689
hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4690
hci_dev_unlock(hdev);
4691
}
4692
4693
static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4694
struct sk_buff *skb)
4695
{
4696
struct hci_ev_link_key_notify *ev = data;
4697
struct hci_conn *conn;
4698
struct link_key *key;
4699
bool persistent;
4700
u8 pin_len = 0;
4701
4702
bt_dev_dbg(hdev, "");
4703
4704
hci_dev_lock(hdev);
4705
4706
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4707
if (!conn)
4708
goto unlock;
4709
4710
/* Ignore NULL link key against CVE-2020-26555 */
4711
if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4712
bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4713
&ev->bdaddr);
4714
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4715
hci_conn_drop(conn);
4716
goto unlock;
4717
}
4718
4719
hci_conn_hold(conn);
4720
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4721
hci_conn_drop(conn);
4722
4723
set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4724
conn_set_key(conn, ev->key_type, conn->pin_length);
4725
4726
if (!hci_dev_test_flag(hdev, HCI_MGMT))
4727
goto unlock;
4728
4729
key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4730
ev->key_type, pin_len, &persistent);
4731
if (!key)
4732
goto unlock;
4733
4734
/* Update connection information since adding the key will have
4735
* fixed up the type in the case of changed combination keys.
4736
*/
4737
if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4738
conn_set_key(conn, key->type, key->pin_len);
4739
4740
mgmt_new_link_key(hdev, key, persistent);
4741
4742
/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4743
* is set. If it's not set simply remove the key from the kernel
4744
* list (we've still notified user space about it but with
4745
* store_hint being 0).
4746
*/
4747
if (key->type == HCI_LK_DEBUG_COMBINATION &&
4748
!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4749
list_del_rcu(&key->list);
4750
kfree_rcu(key, rcu);
4751
goto unlock;
4752
}
4753
4754
if (persistent)
4755
clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4756
else
4757
set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4758
4759
unlock:
4760
hci_dev_unlock(hdev);
4761
}
4762
4763
static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4764
struct sk_buff *skb)
4765
{
4766
struct hci_ev_clock_offset *ev = data;
4767
struct hci_conn *conn;
4768
4769
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4770
4771
hci_dev_lock(hdev);
4772
4773
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4774
if (conn && !ev->status) {
4775
struct inquiry_entry *ie;
4776
4777
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4778
if (ie) {
4779
ie->data.clock_offset = ev->clock_offset;
4780
ie->timestamp = jiffies;
4781
}
4782
}
4783
4784
hci_dev_unlock(hdev);
4785
}
4786
4787
static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4788
struct sk_buff *skb)
4789
{
4790
struct hci_ev_pkt_type_change *ev = data;
4791
struct hci_conn *conn;
4792
4793
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4794
4795
hci_dev_lock(hdev);
4796
4797
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4798
if (conn && !ev->status)
4799
conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4800
4801
hci_dev_unlock(hdev);
4802
}
4803
4804
static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4805
struct sk_buff *skb)
4806
{
4807
struct hci_ev_pscan_rep_mode *ev = data;
4808
struct inquiry_entry *ie;
4809
4810
bt_dev_dbg(hdev, "");
4811
4812
hci_dev_lock(hdev);
4813
4814
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4815
if (ie) {
4816
ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4817
ie->timestamp = jiffies;
4818
}
4819
4820
hci_dev_unlock(hdev);
4821
}
4822
4823
static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4824
struct sk_buff *skb)
4825
{
4826
struct hci_ev_inquiry_result_rssi *ev = edata;
4827
struct inquiry_data data;
4828
int i;
4829
4830
bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4831
4832
if (!ev->num)
4833
return;
4834
4835
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4836
return;
4837
4838
hci_dev_lock(hdev);
4839
4840
if (skb->len == array_size(ev->num,
4841
sizeof(struct inquiry_info_rssi_pscan))) {
4842
struct inquiry_info_rssi_pscan *info;
4843
4844
for (i = 0; i < ev->num; i++) {
4845
u32 flags;
4846
4847
info = hci_ev_skb_pull(hdev, skb,
4848
HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4849
sizeof(*info));
4850
if (!info) {
4851
bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4852
HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4853
goto unlock;
4854
}
4855
4856
bacpy(&data.bdaddr, &info->bdaddr);
4857
data.pscan_rep_mode = info->pscan_rep_mode;
4858
data.pscan_period_mode = info->pscan_period_mode;
4859
data.pscan_mode = info->pscan_mode;
4860
memcpy(data.dev_class, info->dev_class, 3);
4861
data.clock_offset = info->clock_offset;
4862
data.rssi = info->rssi;
4863
data.ssp_mode = 0x00;
4864
4865
flags = hci_inquiry_cache_update(hdev, &data, false);
4866
4867
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4868
info->dev_class, info->rssi,
4869
flags, NULL, 0, NULL, 0, 0);
4870
}
4871
} else if (skb->len == array_size(ev->num,
4872
sizeof(struct inquiry_info_rssi))) {
4873
struct inquiry_info_rssi *info;
4874
4875
for (i = 0; i < ev->num; i++) {
4876
u32 flags;
4877
4878
info = hci_ev_skb_pull(hdev, skb,
4879
HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4880
sizeof(*info));
4881
if (!info) {
4882
bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4883
HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4884
goto unlock;
4885
}
4886
4887
bacpy(&data.bdaddr, &info->bdaddr);
4888
data.pscan_rep_mode = info->pscan_rep_mode;
4889
data.pscan_period_mode = info->pscan_period_mode;
4890
data.pscan_mode = 0x00;
4891
memcpy(data.dev_class, info->dev_class, 3);
4892
data.clock_offset = info->clock_offset;
4893
data.rssi = info->rssi;
4894
data.ssp_mode = 0x00;
4895
4896
flags = hci_inquiry_cache_update(hdev, &data, false);
4897
4898
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4899
info->dev_class, info->rssi,
4900
flags, NULL, 0, NULL, 0, 0);
4901
}
4902
} else {
4903
bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4904
HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4905
}
4906
unlock:
4907
hci_dev_unlock(hdev);
4908
}
4909
4910
static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4911
struct sk_buff *skb)
4912
{
4913
struct hci_ev_remote_ext_features *ev = data;
4914
struct hci_conn *conn;
4915
4916
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4917
4918
hci_dev_lock(hdev);
4919
4920
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4921
if (!conn)
4922
goto unlock;
4923
4924
if (ev->page < HCI_MAX_PAGES)
4925
memcpy(conn->features[ev->page], ev->features, 8);
4926
4927
if (!ev->status && ev->page == 0x01) {
4928
struct inquiry_entry *ie;
4929
4930
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4931
if (ie)
4932
ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4933
4934
if (ev->features[0] & LMP_HOST_SSP) {
4935
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4936
} else {
4937
/* It is mandatory by the Bluetooth specification that
4938
* Extended Inquiry Results are only used when Secure
4939
* Simple Pairing is enabled, but some devices violate
4940
* this.
4941
*
4942
* To make these devices work, the internal SSP
4943
* enabled flag needs to be cleared if the remote host
4944
* features do not indicate SSP support */
4945
clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4946
}
4947
4948
if (ev->features[0] & LMP_HOST_SC)
4949
set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4950
}
4951
4952
if (conn->state != BT_CONFIG)
4953
goto unlock;
4954
4955
if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4956
struct hci_cp_remote_name_req cp;
4957
memset(&cp, 0, sizeof(cp));
4958
bacpy(&cp.bdaddr, &conn->dst);
4959
cp.pscan_rep_mode = 0x02;
4960
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4961
} else {
4962
mgmt_device_connected(hdev, conn, NULL, 0);
4963
}
4964
4965
if (!hci_outgoing_auth_needed(hdev, conn)) {
4966
conn->state = BT_CONNECTED;
4967
hci_connect_cfm(conn, ev->status);
4968
hci_conn_drop(conn);
4969
}
4970
4971
unlock:
4972
hci_dev_unlock(hdev);
4973
}
4974
4975
static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4976
struct sk_buff *skb)
4977
{
4978
struct hci_ev_sync_conn_complete *ev = data;
4979
struct hci_conn *conn;
4980
u8 status = ev->status;
4981
4982
switch (ev->link_type) {
4983
case SCO_LINK:
4984
case ESCO_LINK:
4985
break;
4986
default:
4987
/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4988
* for HCI_Synchronous_Connection_Complete is limited to
4989
* either SCO or eSCO
4990
*/
4991
bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4992
return;
4993
}
4994
4995
bt_dev_dbg(hdev, "status 0x%2.2x", status);
4996
4997
hci_dev_lock(hdev);
4998
4999
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5000
if (!conn) {
5001
if (ev->link_type == ESCO_LINK)
5002
goto unlock;
5003
5004
/* When the link type in the event indicates SCO connection
5005
* and lookup of the connection object fails, then check
5006
* if an eSCO connection object exists.
5007
*
5008
* The core limits the synchronous connections to either
5009
* SCO or eSCO. The eSCO connection is preferred and tried
5010
* to be setup first and until successfully established,
5011
* the link type will be hinted as eSCO.
5012
*/
5013
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5014
if (!conn)
5015
goto unlock;
5016
}
5017
5018
/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5019
* Processing it more than once per connection can corrupt kernel memory.
5020
*
5021
* As the connection handle is set here for the first time, it indicates
5022
* whether the connection is already set up.
5023
*/
5024
if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5025
bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5026
goto unlock;
5027
}
5028
5029
switch (status) {
5030
case 0x00:
5031
status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5032
if (status) {
5033
conn->state = BT_CLOSED;
5034
break;
5035
}
5036
5037
conn->state = BT_CONNECTED;
5038
conn->type = ev->link_type;
5039
5040
hci_debugfs_create_conn(conn);
5041
hci_conn_add_sysfs(conn);
5042
break;
5043
5044
case 0x10: /* Connection Accept Timeout */
5045
case 0x0d: /* Connection Rejected due to Limited Resources */
5046
case 0x11: /* Unsupported Feature or Parameter Value */
5047
case 0x1c: /* SCO interval rejected */
5048
case 0x1a: /* Unsupported Remote Feature */
5049
case 0x1e: /* Invalid LMP Parameters */
5050
case 0x1f: /* Unspecified error */
5051
case 0x20: /* Unsupported LMP Parameter value */
5052
if (conn->out) {
5053
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5054
(hdev->esco_type & EDR_ESCO_MASK);
5055
if (hci_setup_sync(conn, conn->parent->handle))
5056
goto unlock;
5057
}
5058
fallthrough;
5059
5060
default:
5061
conn->state = BT_CLOSED;
5062
break;
5063
}
5064
5065
bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5066
/* Notify only in case of SCO over HCI transport data path which
5067
* is zero and non-zero value shall be non-HCI transport data path
5068
*/
5069
if (conn->codec.data_path == 0 && hdev->notify) {
5070
switch (ev->air_mode) {
5071
case 0x02:
5072
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5073
break;
5074
case 0x03:
5075
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5076
break;
5077
}
5078
}
5079
5080
hci_connect_cfm(conn, status);
5081
if (status)
5082
hci_conn_del(conn);
5083
5084
unlock:
5085
hci_dev_unlock(hdev);
5086
}
5087
5088
static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5089
{
5090
size_t parsed = 0;
5091
5092
while (parsed < eir_len) {
5093
u8 field_len = eir[0];
5094
5095
if (field_len == 0)
5096
return parsed;
5097
5098
parsed += field_len + 1;
5099
eir += field_len + 1;
5100
}
5101
5102
return eir_len;
5103
}
5104
5105
static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5106
struct sk_buff *skb)
5107
{
5108
struct hci_ev_ext_inquiry_result *ev = edata;
5109
struct inquiry_data data;
5110
size_t eir_len;
5111
int i;
5112
5113
if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5114
flex_array_size(ev, info, ev->num)))
5115
return;
5116
5117
bt_dev_dbg(hdev, "num %d", ev->num);
5118
5119
if (!ev->num)
5120
return;
5121
5122
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5123
return;
5124
5125
hci_dev_lock(hdev);
5126
5127
for (i = 0; i < ev->num; i++) {
5128
struct extended_inquiry_info *info = &ev->info[i];
5129
u32 flags;
5130
bool name_known;
5131
5132
bacpy(&data.bdaddr, &info->bdaddr);
5133
data.pscan_rep_mode = info->pscan_rep_mode;
5134
data.pscan_period_mode = info->pscan_period_mode;
5135
data.pscan_mode = 0x00;
5136
memcpy(data.dev_class, info->dev_class, 3);
5137
data.clock_offset = info->clock_offset;
5138
data.rssi = info->rssi;
5139
data.ssp_mode = 0x01;
5140
5141
if (hci_dev_test_flag(hdev, HCI_MGMT))
5142
name_known = eir_get_data(info->data,
5143
sizeof(info->data),
5144
EIR_NAME_COMPLETE, NULL);
5145
else
5146
name_known = true;
5147
5148
flags = hci_inquiry_cache_update(hdev, &data, name_known);
5149
5150
eir_len = eir_get_length(info->data, sizeof(info->data));
5151
5152
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5153
info->dev_class, info->rssi,
5154
flags, info->data, eir_len, NULL, 0, 0);
5155
}
5156
5157
hci_dev_unlock(hdev);
5158
}
5159
5160
static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5161
struct sk_buff *skb)
5162
{
5163
struct hci_ev_key_refresh_complete *ev = data;
5164
struct hci_conn *conn;
5165
5166
bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5167
__le16_to_cpu(ev->handle));
5168
5169
hci_dev_lock(hdev);
5170
5171
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5172
if (!conn)
5173
goto unlock;
5174
5175
/* For BR/EDR the necessary steps are taken through the
5176
* auth_complete event.
5177
*/
5178
if (conn->type != LE_LINK)
5179
goto unlock;
5180
5181
if (!ev->status)
5182
conn->sec_level = conn->pending_sec_level;
5183
5184
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5185
5186
if (ev->status && conn->state == BT_CONNECTED) {
5187
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5188
hci_conn_drop(conn);
5189
goto unlock;
5190
}
5191
5192
if (conn->state == BT_CONFIG) {
5193
if (!ev->status)
5194
conn->state = BT_CONNECTED;
5195
5196
hci_connect_cfm(conn, ev->status);
5197
hci_conn_drop(conn);
5198
} else {
5199
hci_auth_cfm(conn, ev->status);
5200
5201
hci_conn_hold(conn);
5202
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5203
hci_conn_drop(conn);
5204
}
5205
5206
unlock:
5207
hci_dev_unlock(hdev);
5208
}
5209
5210
static u8 hci_get_auth_req(struct hci_conn *conn)
5211
{
5212
/* If remote requests no-bonding follow that lead */
5213
if (conn->remote_auth == HCI_AT_NO_BONDING ||
5214
conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5215
return conn->remote_auth | (conn->auth_type & 0x01);
5216
5217
/* If both remote and local have enough IO capabilities, require
5218
* MITM protection
5219
*/
5220
if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5221
conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5222
return conn->remote_auth | 0x01;
5223
5224
/* No MITM protection possible so ignore remote requirement */
5225
return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5226
}
5227
5228
static u8 bredr_oob_data_present(struct hci_conn *conn)
5229
{
5230
struct hci_dev *hdev = conn->hdev;
5231
struct oob_data *data;
5232
5233
data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5234
if (!data)
5235
return 0x00;
5236
5237
if (bredr_sc_enabled(hdev)) {
5238
/* When Secure Connections is enabled, then just
5239
* return the present value stored with the OOB
5240
* data. The stored value contains the right present
5241
* information. However it can only be trusted when
5242
* not in Secure Connection Only mode.
5243
*/
5244
if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5245
return data->present;
5246
5247
/* When Secure Connections Only mode is enabled, then
5248
* the P-256 values are required. If they are not
5249
* available, then do not declare that OOB data is
5250
* present.
5251
*/
5252
if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5253
!crypto_memneq(data->hash256, ZERO_KEY, 16))
5254
return 0x00;
5255
5256
return 0x02;
5257
}
5258
5259
/* When Secure Connections is not enabled or actually
5260
* not supported by the hardware, then check that if
5261
* P-192 data values are present.
5262
*/
5263
if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5264
!crypto_memneq(data->hash192, ZERO_KEY, 16))
5265
return 0x00;
5266
5267
return 0x01;
5268
}
5269
5270
static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5271
struct sk_buff *skb)
5272
{
5273
struct hci_ev_io_capa_request *ev = data;
5274
struct hci_conn *conn;
5275
5276
bt_dev_dbg(hdev, "");
5277
5278
hci_dev_lock(hdev);
5279
5280
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5281
if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5282
goto unlock;
5283
5284
/* Assume remote supports SSP since it has triggered this event */
5285
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5286
5287
hci_conn_hold(conn);
5288
5289
if (!hci_dev_test_flag(hdev, HCI_MGMT))
5290
goto unlock;
5291
5292
/* Allow pairing if we're pairable, the initiators of the
5293
* pairing or if the remote is not requesting bonding.
5294
*/
5295
if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5296
test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5297
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5298
struct hci_cp_io_capability_reply cp;
5299
5300
bacpy(&cp.bdaddr, &ev->bdaddr);
5301
/* Change the IO capability from KeyboardDisplay
5302
* to DisplayYesNo as it is not supported by BT spec. */
5303
cp.capability = (conn->io_capability == 0x04) ?
5304
HCI_IO_DISPLAY_YESNO : conn->io_capability;
5305
5306
/* If we are initiators, there is no remote information yet */
5307
if (conn->remote_auth == 0xff) {
5308
/* Request MITM protection if our IO caps allow it
5309
* except for the no-bonding case.
5310
*/
5311
if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5312
conn->auth_type != HCI_AT_NO_BONDING)
5313
conn->auth_type |= 0x01;
5314
} else {
5315
conn->auth_type = hci_get_auth_req(conn);
5316
}
5317
5318
/* If we're not bondable, force one of the non-bondable
5319
* authentication requirement values.
5320
*/
5321
if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5322
conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5323
5324
cp.authentication = conn->auth_type;
5325
cp.oob_data = bredr_oob_data_present(conn);
5326
5327
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5328
sizeof(cp), &cp);
5329
} else {
5330
struct hci_cp_io_capability_neg_reply cp;
5331
5332
bacpy(&cp.bdaddr, &ev->bdaddr);
5333
cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5334
5335
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5336
sizeof(cp), &cp);
5337
}
5338
5339
unlock:
5340
hci_dev_unlock(hdev);
5341
}
5342
5343
static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5344
struct sk_buff *skb)
5345
{
5346
struct hci_ev_io_capa_reply *ev = data;
5347
struct hci_conn *conn;
5348
5349
bt_dev_dbg(hdev, "");
5350
5351
hci_dev_lock(hdev);
5352
5353
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5354
if (!conn)
5355
goto unlock;
5356
5357
conn->remote_cap = ev->capability;
5358
conn->remote_auth = ev->authentication;
5359
5360
unlock:
5361
hci_dev_unlock(hdev);
5362
}
5363
5364
static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5365
struct sk_buff *skb)
5366
{
5367
struct hci_ev_user_confirm_req *ev = data;
5368
int loc_mitm, rem_mitm, confirm_hint = 0;
5369
struct hci_conn *conn;
5370
5371
bt_dev_dbg(hdev, "");
5372
5373
hci_dev_lock(hdev);
5374
5375
if (!hci_dev_test_flag(hdev, HCI_MGMT))
5376
goto unlock;
5377
5378
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5379
if (!conn)
5380
goto unlock;
5381
5382
loc_mitm = (conn->auth_type & 0x01);
5383
rem_mitm = (conn->remote_auth & 0x01);
5384
5385
/* If we require MITM but the remote device can't provide that
5386
* (it has NoInputNoOutput) then reject the confirmation
5387
* request. We check the security level here since it doesn't
5388
* necessarily match conn->auth_type.
5389
*/
5390
if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5391
conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5392
bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5393
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5394
sizeof(ev->bdaddr), &ev->bdaddr);
5395
goto unlock;
5396
}
5397
5398
/* If no side requires MITM protection; use JUST_CFM method */
5399
if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5400
(!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5401
5402
/* If we're not the initiator of request authorization and the
5403
* local IO capability is not NoInputNoOutput, use JUST_WORKS
5404
* method (mgmt_user_confirm with confirm_hint set to 1).
5405
*/
5406
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5407
conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
5408
bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5409
confirm_hint = 1;
5410
goto confirm;
5411
}
5412
5413
/* If there already exists link key in local host, leave the
5414
* decision to user space since the remote device could be
5415
* legitimate or malicious.
5416
*/
5417
if (hci_find_link_key(hdev, &ev->bdaddr)) {
5418
bt_dev_dbg(hdev, "Local host already has link key");
5419
confirm_hint = 1;
5420
goto confirm;
5421
}
5422
5423
BT_DBG("Auto-accept of user confirmation with %ums delay",
5424
hdev->auto_accept_delay);
5425
5426
if (hdev->auto_accept_delay > 0) {
5427
int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5428
queue_delayed_work(conn->hdev->workqueue,
5429
&conn->auto_accept_work, delay);
5430
goto unlock;
5431
}
5432
5433
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5434
sizeof(ev->bdaddr), &ev->bdaddr);
5435
goto unlock;
5436
}
5437
5438
confirm:
5439
mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5440
le32_to_cpu(ev->passkey), confirm_hint);
5441
5442
unlock:
5443
hci_dev_unlock(hdev);
5444
}
5445
5446
static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5447
struct sk_buff *skb)
5448
{
5449
struct hci_ev_user_passkey_req *ev = data;
5450
5451
bt_dev_dbg(hdev, "");
5452
5453
if (hci_dev_test_flag(hdev, HCI_MGMT))
5454
mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5455
}
5456
5457
static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5458
struct sk_buff *skb)
5459
{
5460
struct hci_ev_user_passkey_notify *ev = data;
5461
struct hci_conn *conn;
5462
5463
bt_dev_dbg(hdev, "");
5464
5465
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5466
if (!conn)
5467
return;
5468
5469
conn->passkey_notify = __le32_to_cpu(ev->passkey);
5470
conn->passkey_entered = 0;
5471
5472
if (hci_dev_test_flag(hdev, HCI_MGMT))
5473
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5474
conn->dst_type, conn->passkey_notify,
5475
conn->passkey_entered);
5476
}
5477
5478
static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5479
struct sk_buff *skb)
5480
{
5481
struct hci_ev_keypress_notify *ev = data;
5482
struct hci_conn *conn;
5483
5484
bt_dev_dbg(hdev, "");
5485
5486
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5487
if (!conn)
5488
return;
5489
5490
switch (ev->type) {
5491
case HCI_KEYPRESS_STARTED:
5492
conn->passkey_entered = 0;
5493
return;
5494
5495
case HCI_KEYPRESS_ENTERED:
5496
conn->passkey_entered++;
5497
break;
5498
5499
case HCI_KEYPRESS_ERASED:
5500
conn->passkey_entered--;
5501
break;
5502
5503
case HCI_KEYPRESS_CLEARED:
5504
conn->passkey_entered = 0;
5505
break;
5506
5507
case HCI_KEYPRESS_COMPLETED:
5508
return;
5509
}
5510
5511
if (hci_dev_test_flag(hdev, HCI_MGMT))
5512
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5513
conn->dst_type, conn->passkey_notify,
5514
conn->passkey_entered);
5515
}
5516
5517
static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5518
struct sk_buff *skb)
5519
{
5520
struct hci_ev_simple_pair_complete *ev = data;
5521
struct hci_conn *conn;
5522
5523
bt_dev_dbg(hdev, "");
5524
5525
hci_dev_lock(hdev);
5526
5527
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5528
if (!conn || !hci_conn_ssp_enabled(conn))
5529
goto unlock;
5530
5531
/* Reset the authentication requirement to unknown */
5532
conn->remote_auth = 0xff;
5533
5534
/* To avoid duplicate auth_failed events to user space we check
5535
* the HCI_CONN_AUTH_PEND flag which will be set if we
5536
* initiated the authentication. A traditional auth_complete
5537
* event gets always produced as initiator and is also mapped to
5538
* the mgmt_auth_failed event */
5539
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5540
mgmt_auth_failed(conn, ev->status);
5541
5542
hci_conn_drop(conn);
5543
5544
unlock:
5545
hci_dev_unlock(hdev);
5546
}
5547
5548
static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5549
struct sk_buff *skb)
5550
{
5551
struct hci_ev_remote_host_features *ev = data;
5552
struct inquiry_entry *ie;
5553
struct hci_conn *conn;
5554
5555
bt_dev_dbg(hdev, "");
5556
5557
hci_dev_lock(hdev);
5558
5559
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5560
if (conn)
5561
memcpy(conn->features[1], ev->features, 8);
5562
5563
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5564
if (ie)
5565
ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5566
5567
hci_dev_unlock(hdev);
5568
}
5569
5570
static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5571
struct sk_buff *skb)
5572
{
5573
struct hci_ev_remote_oob_data_request *ev = edata;
5574
struct oob_data *data;
5575
5576
bt_dev_dbg(hdev, "");
5577
5578
hci_dev_lock(hdev);
5579
5580
if (!hci_dev_test_flag(hdev, HCI_MGMT))
5581
goto unlock;
5582
5583
data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5584
if (!data) {
5585
struct hci_cp_remote_oob_data_neg_reply cp;
5586
5587
bacpy(&cp.bdaddr, &ev->bdaddr);
5588
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5589
sizeof(cp), &cp);
5590
goto unlock;
5591
}
5592
5593
if (bredr_sc_enabled(hdev)) {
5594
struct hci_cp_remote_oob_ext_data_reply cp;
5595
5596
bacpy(&cp.bdaddr, &ev->bdaddr);
5597
if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5598
memset(cp.hash192, 0, sizeof(cp.hash192));
5599
memset(cp.rand192, 0, sizeof(cp.rand192));
5600
} else {
5601
memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5602
memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5603
}
5604
memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5605
memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5606
5607
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5608
sizeof(cp), &cp);
5609
} else {
5610
struct hci_cp_remote_oob_data_reply cp;
5611
5612
bacpy(&cp.bdaddr, &ev->bdaddr);
5613
memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5614
memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5615
5616
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5617
sizeof(cp), &cp);
5618
}
5619
5620
unlock:
5621
hci_dev_unlock(hdev);
5622
}
5623
5624
static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5625
u8 bdaddr_type, bdaddr_t *local_rpa)
5626
{
5627
if (conn->out) {
5628
conn->dst_type = bdaddr_type;
5629
conn->resp_addr_type = bdaddr_type;
5630
bacpy(&conn->resp_addr, bdaddr);
5631
5632
/* Check if the controller has set a Local RPA then it must be
5633
* used instead or hdev->rpa.
5634
*/
5635
if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5636
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5637
bacpy(&conn->init_addr, local_rpa);
5638
} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5639
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5640
bacpy(&conn->init_addr, &conn->hdev->rpa);
5641
} else {
5642
hci_copy_identity_address(conn->hdev, &conn->init_addr,
5643
&conn->init_addr_type);
5644
}
5645
} else {
5646
conn->resp_addr_type = conn->hdev->adv_addr_type;
5647
/* Check if the controller has set a Local RPA then it must be
5648
* used instead or hdev->rpa.
5649
*/
5650
if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5651
conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5652
bacpy(&conn->resp_addr, local_rpa);
5653
} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5654
/* In case of ext adv, resp_addr will be updated in
5655
* Adv Terminated event.
5656
*/
5657
if (!ext_adv_capable(conn->hdev))
5658
bacpy(&conn->resp_addr,
5659
&conn->hdev->random_addr);
5660
} else {
5661
bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5662
}
5663
5664
conn->init_addr_type = bdaddr_type;
5665
bacpy(&conn->init_addr, bdaddr);
5666
5667
/* For incoming connections, set the default minimum
5668
* and maximum connection interval. They will be used
5669
* to check if the parameters are in range and if not
5670
* trigger the connection update procedure.
5671
*/
5672
conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5673
conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5674
}
5675
}
5676
5677
static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5678
bdaddr_t *bdaddr, u8 bdaddr_type,
5679
bdaddr_t *local_rpa, u8 role, u16 handle,
5680
u16 interval, u16 latency,
5681
u16 supervision_timeout)
5682
{
5683
struct hci_conn_params *params;
5684
struct hci_conn *conn;
5685
struct smp_irk *irk;
5686
u8 addr_type;
5687
int err;
5688
5689
hci_dev_lock(hdev);
5690
5691
/* All controllers implicitly stop advertising in the event of a
5692
* connection, so ensure that the state bit is cleared.
5693
*/
5694
hci_dev_clear_flag(hdev, HCI_LE_ADV);
5695
5696
/* Check for existing connection:
5697
*
5698
* 1. If it doesn't exist then use the role to create a new object.
5699
* 2. If it does exist confirm that it is connecting/BT_CONNECT in case
5700
* of initiator/master role since there could be a collision where
5701
* either side is attempting to connect or something like a fuzzing
5702
* testing is trying to play tricks to destroy the hcon object before
5703
* it even attempts to connect (e.g. hcon->state == BT_OPEN).
5704
*/
5705
conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr);
5706
if (!conn ||
5707
(conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
5708
/* In case of error status and there is no connection pending
5709
* just unlock as there is nothing to cleanup.
5710
*/
5711
if (status)
5712
goto unlock;
5713
5714
conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, bdaddr_type,
5715
role);
5716
if (IS_ERR(conn)) {
5717
bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
5718
goto unlock;
5719
}
5720
5721
/* If we didn't have a hci_conn object previously
5722
* but we're in central role this must be something
5723
* initiated using an accept list. Since accept list based
5724
* connections are not "first class citizens" we don't
5725
* have full tracking of them. Therefore, we go ahead
5726
* with a "best effort" approach of determining the
5727
* initiator address based on the HCI_PRIVACY flag.
5728
*/
5729
if (conn->out) {
5730
conn->resp_addr_type = bdaddr_type;
5731
bacpy(&conn->resp_addr, bdaddr);
5732
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5733
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5734
bacpy(&conn->init_addr, &hdev->rpa);
5735
} else {
5736
hci_copy_identity_address(hdev,
5737
&conn->init_addr,
5738
&conn->init_addr_type);
5739
}
5740
}
5741
} else {
5742
cancel_delayed_work(&conn->le_conn_timeout);
5743
}
5744
5745
/* The HCI_LE_Connection_Complete event is only sent once per connection.
5746
* Processing it more than once per connection can corrupt kernel memory.
5747
*
5748
* As the connection handle is set here for the first time, it indicates
5749
* whether the connection is already set up.
5750
*/
5751
if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5752
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5753
goto unlock;
5754
}
5755
5756
le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5757
5758
/* Lookup the identity address from the stored connection
5759
* address and address type.
5760
*
5761
* When establishing connections to an identity address, the
5762
* connection procedure will store the resolvable random
5763
* address first. Now if it can be converted back into the
5764
* identity address, start using the identity address from
5765
* now on.
5766
*/
5767
irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5768
if (irk) {
5769
bacpy(&conn->dst, &irk->bdaddr);
5770
conn->dst_type = irk->addr_type;
5771
}
5772
5773
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5774
5775
/* All connection failure handling is taken care of by the
5776
* hci_conn_failed function which is triggered by the HCI
5777
* request completion callbacks used for connecting.
5778
*/
5779
if (status || hci_conn_set_handle(conn, handle))
5780
goto unlock;
5781
5782
/* Drop the connection if it has been aborted */
5783
if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5784
hci_conn_drop(conn);
5785
goto unlock;
5786
}
5787
5788
if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5789
addr_type = BDADDR_LE_PUBLIC;
5790
else
5791
addr_type = BDADDR_LE_RANDOM;
5792
5793
/* Drop the connection if the device is blocked */
5794
if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5795
hci_conn_drop(conn);
5796
goto unlock;
5797
}
5798
5799
mgmt_device_connected(hdev, conn, NULL, 0);
5800
5801
conn->sec_level = BT_SECURITY_LOW;
5802
conn->state = BT_CONFIG;
5803
5804
/* Store current advertising instance as connection advertising instance
5805
* when software rotation is in use so it can be re-enabled when
5806
* disconnected.
5807
*/
5808
if (!ext_adv_capable(hdev))
5809
conn->adv_instance = hdev->cur_adv_instance;
5810
5811
conn->le_conn_interval = interval;
5812
conn->le_conn_latency = latency;
5813
conn->le_supv_timeout = supervision_timeout;
5814
5815
hci_debugfs_create_conn(conn);
5816
hci_conn_add_sysfs(conn);
5817
5818
err = hci_le_read_remote_features(conn);
5819
if (err) {
5820
conn->state = BT_CONNECTED;
5821
hci_connect_cfm(conn, status);
5822
}
5823
5824
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5825
conn->dst_type);
5826
if (params) {
5827
hci_pend_le_list_del_init(params);
5828
if (params->conn) {
5829
hci_conn_drop(params->conn);
5830
hci_conn_put(params->conn);
5831
params->conn = NULL;
5832
}
5833
}
5834
5835
unlock:
5836
hci_update_passive_scan(hdev);
5837
hci_dev_unlock(hdev);
5838
}
5839
5840
static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5841
struct sk_buff *skb)
5842
{
5843
struct hci_ev_le_conn_complete *ev = data;
5844
5845
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5846
5847
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5848
NULL, ev->role, le16_to_cpu(ev->handle),
5849
le16_to_cpu(ev->interval),
5850
le16_to_cpu(ev->latency),
5851
le16_to_cpu(ev->supervision_timeout));
5852
}
5853
5854
static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5855
struct sk_buff *skb)
5856
{
5857
struct hci_ev_le_enh_conn_complete *ev = data;
5858
5859
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5860
5861
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5862
&ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5863
le16_to_cpu(ev->interval),
5864
le16_to_cpu(ev->latency),
5865
le16_to_cpu(ev->supervision_timeout));
5866
}
5867
5868
static void hci_le_pa_sync_lost_evt(struct hci_dev *hdev, void *data,
5869
struct sk_buff *skb)
5870
{
5871
struct hci_ev_le_pa_sync_lost *ev = data;
5872
u16 handle = le16_to_cpu(ev->handle);
5873
struct hci_conn *conn;
5874
5875
bt_dev_dbg(hdev, "sync handle 0x%4.4x", handle);
5876
5877
hci_dev_lock(hdev);
5878
5879
/* Delete the pa sync connection */
5880
conn = hci_conn_hash_lookup_pa_sync_handle(hdev, handle);
5881
if (conn) {
5882
clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
5883
clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
5884
hci_disconn_cfm(conn, HCI_ERROR_REMOTE_USER_TERM);
5885
hci_conn_del(conn);
5886
}
5887
5888
hci_dev_unlock(hdev);
5889
}
5890
5891
static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5892
struct sk_buff *skb)
5893
{
5894
struct hci_evt_le_ext_adv_set_term *ev = data;
5895
struct hci_conn *conn;
5896
struct adv_info *adv, *n;
5897
5898
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5899
5900
/* The Bluetooth Core 5.3 specification clearly states that this event
5901
* shall not be sent when the Host disables the advertising set. So in
5902
* case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5903
*
5904
* When the Host disables an advertising set, all cleanup is done via
5905
* its command callback and not needed to be duplicated here.
5906
*/
5907
if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5908
bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5909
return;
5910
}
5911
5912
hci_dev_lock(hdev);
5913
5914
adv = hci_find_adv_instance(hdev, ev->handle);
5915
5916
if (ev->status) {
5917
if (!adv)
5918
goto unlock;
5919
5920
/* Remove advertising as it has been terminated */
5921
hci_remove_adv_instance(hdev, ev->handle);
5922
mgmt_advertising_removed(NULL, hdev, ev->handle);
5923
5924
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5925
if (adv->enabled)
5926
goto unlock;
5927
}
5928
5929
/* We are no longer advertising, clear HCI_LE_ADV */
5930
hci_dev_clear_flag(hdev, HCI_LE_ADV);
5931
goto unlock;
5932
}
5933
5934
if (adv)
5935
adv->enabled = false;
5936
5937
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5938
if (conn) {
5939
/* Store handle in the connection so the correct advertising
5940
* instance can be re-enabled when disconnected.
5941
*/
5942
conn->adv_instance = ev->handle;
5943
5944
if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5945
bacmp(&conn->resp_addr, BDADDR_ANY))
5946
goto unlock;
5947
5948
if (!ev->handle) {
5949
bacpy(&conn->resp_addr, &hdev->random_addr);
5950
goto unlock;
5951
}
5952
5953
if (adv)
5954
bacpy(&conn->resp_addr, &adv->random_addr);
5955
}
5956
5957
unlock:
5958
hci_dev_unlock(hdev);
5959
}
5960
5961
static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
5962
{
5963
struct hci_cp_le_pa_term_sync cp;
5964
5965
memset(&cp, 0, sizeof(cp));
5966
cp.handle = handle;
5967
5968
return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
5969
}
5970
5971
static void hci_le_past_received_evt(struct hci_dev *hdev, void *data,
5972
struct sk_buff *skb)
5973
{
5974
struct hci_ev_le_past_received *ev = data;
5975
int mask = hdev->link_mode;
5976
__u8 flags = 0;
5977
struct hci_conn *pa_sync, *conn;
5978
5979
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5980
5981
hci_dev_lock(hdev);
5982
5983
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
5984
5985
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
5986
if (!conn) {
5987
bt_dev_err(hdev,
5988
"Unable to find connection for dst %pMR sid 0x%2.2x",
5989
&ev->bdaddr, ev->sid);
5990
goto unlock;
5991
}
5992
5993
conn->sync_handle = le16_to_cpu(ev->sync_handle);
5994
conn->sid = HCI_SID_INVALID;
5995
5996
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
5997
&flags);
5998
if (!(mask & HCI_LM_ACCEPT)) {
5999
hci_le_pa_term_sync(hdev, ev->sync_handle);
6000
goto unlock;
6001
}
6002
6003
if (!(flags & HCI_PROTO_DEFER))
6004
goto unlock;
6005
6006
/* Add connection to indicate PA sync event */
6007
pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0,
6008
HCI_ROLE_SLAVE);
6009
6010
if (IS_ERR(pa_sync))
6011
goto unlock;
6012
6013
pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
6014
6015
if (ev->status) {
6016
set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6017
6018
/* Notify iso layer */
6019
hci_connect_cfm(pa_sync, ev->status);
6020
}
6021
6022
unlock:
6023
hci_dev_unlock(hdev);
6024
}
6025
6026
static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6027
struct sk_buff *skb)
6028
{
6029
struct hci_ev_le_conn_update_complete *ev = data;
6030
struct hci_conn *conn;
6031
6032
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6033
6034
if (ev->status)
6035
return;
6036
6037
hci_dev_lock(hdev);
6038
6039
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6040
if (conn) {
6041
conn->le_conn_interval = le16_to_cpu(ev->interval);
6042
conn->le_conn_latency = le16_to_cpu(ev->latency);
6043
conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6044
}
6045
6046
hci_dev_unlock(hdev);
6047
}
6048
6049
/* This function requires the caller holds hdev->lock */
6050
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6051
bdaddr_t *addr,
6052
u8 addr_type, bool addr_resolved,
6053
u8 adv_type, u8 phy, u8 sec_phy)
6054
{
6055
struct hci_conn *conn;
6056
struct hci_conn_params *params;
6057
6058
/* If the event is not connectable don't proceed further */
6059
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6060
return NULL;
6061
6062
/* Ignore if the device is blocked or hdev is suspended */
6063
if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6064
hdev->suspended)
6065
return NULL;
6066
6067
/* Most controller will fail if we try to create new connections
6068
* while we have an existing one in peripheral role.
6069
*/
6070
if (hdev->conn_hash.le_num_peripheral > 0 &&
6071
(hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) ||
6072
!(hdev->le_states[3] & 0x10)))
6073
return NULL;
6074
6075
/* If we're not connectable only connect devices that we have in
6076
* our pend_le_conns list.
6077
*/
6078
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6079
addr_type);
6080
if (!params)
6081
return NULL;
6082
6083
if (!params->explicit_connect) {
6084
switch (params->auto_connect) {
6085
case HCI_AUTO_CONN_DIRECT:
6086
/* Only devices advertising with ADV_DIRECT_IND are
6087
* triggering a connection attempt. This is allowing
6088
* incoming connections from peripheral devices.
6089
*/
6090
if (adv_type != LE_ADV_DIRECT_IND)
6091
return NULL;
6092
break;
6093
case HCI_AUTO_CONN_ALWAYS:
6094
/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6095
* are triggering a connection attempt. This means
6096
* that incoming connections from peripheral device are
6097
* accepted and also outgoing connections to peripheral
6098
* devices are established when found.
6099
*/
6100
break;
6101
default:
6102
return NULL;
6103
}
6104
}
6105
6106
conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6107
BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6108
HCI_ROLE_MASTER, phy, sec_phy);
6109
if (!IS_ERR(conn)) {
6110
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6111
* by higher layer that tried to connect, if no then
6112
* store the pointer since we don't really have any
6113
* other owner of the object besides the params that
6114
* triggered it. This way we can abort the connection if
6115
* the parameters get removed and keep the reference
6116
* count consistent once the connection is established.
6117
*/
6118
6119
if (!params->explicit_connect)
6120
params->conn = hci_conn_get(conn);
6121
6122
return conn;
6123
}
6124
6125
switch (PTR_ERR(conn)) {
6126
case -EBUSY:
6127
/* If hci_connect() returns -EBUSY it means there is already
6128
* an LE connection attempt going on. Since controllers don't
6129
* support more than one connection attempt at the time, we
6130
* don't consider this an error case.
6131
*/
6132
break;
6133
default:
6134
BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6135
return NULL;
6136
}
6137
6138
return NULL;
6139
}
6140
6141
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6142
u8 bdaddr_type, bdaddr_t *direct_addr,
6143
u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6144
u8 *data, u8 len, bool ext_adv, bool ctl_time,
6145
u64 instant)
6146
{
6147
struct discovery_state *d = &hdev->discovery;
6148
struct smp_irk *irk;
6149
struct hci_conn *conn;
6150
bool match, bdaddr_resolved;
6151
u32 flags;
6152
u8 *ptr;
6153
6154
switch (type) {
6155
case LE_ADV_IND:
6156
case LE_ADV_DIRECT_IND:
6157
case LE_ADV_SCAN_IND:
6158
case LE_ADV_NONCONN_IND:
6159
case LE_ADV_SCAN_RSP:
6160
break;
6161
default:
6162
bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6163
"type: 0x%02x", type);
6164
return;
6165
}
6166
6167
if (len > max_adv_len(hdev)) {
6168
bt_dev_err_ratelimited(hdev,
6169
"adv larger than maximum supported");
6170
return;
6171
}
6172
6173
/* Find the end of the data in case the report contains padded zero
6174
* bytes at the end causing an invalid length value.
6175
*
6176
* When data is NULL, len is 0 so there is no need for extra ptr
6177
* check as 'ptr < data + 0' is already false in such case.
6178
*/
6179
for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6180
if (ptr + 1 + *ptr > data + len)
6181
break;
6182
}
6183
6184
/* Adjust for actual length. This handles the case when remote
6185
* device is advertising with incorrect data length.
6186
*/
6187
len = ptr - data;
6188
6189
/* If the direct address is present, then this report is from
6190
* a LE Direct Advertising Report event. In that case it is
6191
* important to see if the address is matching the local
6192
* controller address.
6193
*
6194
* If local privacy is not enable the controller shall not be
6195
* generating such event since according to its documentation it is only
6196
* valid for filter_policy 0x02 and 0x03, but the fact that it did
6197
* generate LE Direct Advertising Report means it is probably broken and
6198
* won't generate any other event which can potentially break
6199
* auto-connect logic so in case local privacy is not enable this
6200
* ignores the direct_addr so it works as a regular report.
6201
*/
6202
if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr &&
6203
hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6204
direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6205
&bdaddr_resolved);
6206
6207
/* Only resolvable random addresses are valid for these
6208
* kind of reports and others can be ignored.
6209
*/
6210
if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6211
return;
6212
6213
/* If the local IRK of the controller does not match
6214
* with the resolvable random address provided, then
6215
* this report can be ignored.
6216
*/
6217
if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6218
return;
6219
}
6220
6221
/* Check if we need to convert to identity address */
6222
irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6223
if (irk) {
6224
bdaddr = &irk->bdaddr;
6225
bdaddr_type = irk->addr_type;
6226
}
6227
6228
bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6229
6230
/* Check if we have been requested to connect to this device.
6231
*
6232
* direct_addr is set only for directed advertising reports (it is NULL
6233
* for advertising reports) and is already verified to be RPA above.
6234
*/
6235
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6236
type, phy, sec_phy);
6237
if (!ext_adv && conn && type == LE_ADV_IND &&
6238
len <= max_adv_len(hdev)) {
6239
/* Store report for later inclusion by
6240
* mgmt_device_connected
6241
*/
6242
memcpy(conn->le_adv_data, data, len);
6243
conn->le_adv_data_len = len;
6244
}
6245
6246
if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6247
flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6248
else
6249
flags = 0;
6250
6251
/* All scan results should be sent up for Mesh systems */
6252
if (hci_dev_test_flag(hdev, HCI_MESH)) {
6253
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6254
rssi, flags, data, len, NULL, 0, instant);
6255
return;
6256
}
6257
6258
/* Passive scanning shouldn't trigger any device found events,
6259
* except for devices marked as CONN_REPORT for which we do send
6260
* device found events, or advertisement monitoring requested.
6261
*/
6262
if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6263
if (type == LE_ADV_DIRECT_IND)
6264
return;
6265
6266
if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6267
bdaddr, bdaddr_type) &&
6268
idr_is_empty(&hdev->adv_monitors_idr))
6269
return;
6270
6271
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6272
rssi, flags, data, len, NULL, 0, 0);
6273
return;
6274
}
6275
6276
/* When receiving a scan response, then there is no way to
6277
* know if the remote device is connectable or not. However
6278
* since scan responses are merged with a previously seen
6279
* advertising report, the flags field from that report
6280
* will be used.
6281
*
6282
* In the unlikely case that a controller just sends a scan
6283
* response event that doesn't match the pending report, then
6284
* it is marked as a standalone SCAN_RSP.
6285
*/
6286
if (type == LE_ADV_SCAN_RSP)
6287
flags = MGMT_DEV_FOUND_SCAN_RSP;
6288
6289
/* If there's nothing pending either store the data from this
6290
* event or send an immediate device found event if the data
6291
* should not be stored for later.
6292
*/
6293
if (!has_pending_adv_report(hdev)) {
6294
/* If the report will trigger a SCAN_REQ store it for
6295
* later merging.
6296
*/
6297
if (!ext_adv && (type == LE_ADV_IND ||
6298
type == LE_ADV_SCAN_IND)) {
6299
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6300
rssi, flags, data, len);
6301
return;
6302
}
6303
6304
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6305
rssi, flags, data, len, NULL, 0, 0);
6306
return;
6307
}
6308
6309
/* Check if the pending report is for the same device as the new one */
6310
match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6311
bdaddr_type == d->last_adv_addr_type);
6312
6313
/* If the pending data doesn't match this report or this isn't a
6314
* scan response (e.g. we got a duplicate ADV_IND) then force
6315
* sending of the pending data.
6316
*/
6317
if (type != LE_ADV_SCAN_RSP || !match) {
6318
/* Send out whatever is in the cache, but skip duplicates */
6319
if (!match)
6320
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6321
d->last_adv_addr_type, NULL,
6322
d->last_adv_rssi, d->last_adv_flags,
6323
d->last_adv_data,
6324
d->last_adv_data_len, NULL, 0, 0);
6325
6326
/* If the new report will trigger a SCAN_REQ store it for
6327
* later merging.
6328
*/
6329
if (!ext_adv && (type == LE_ADV_IND ||
6330
type == LE_ADV_SCAN_IND)) {
6331
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6332
rssi, flags, data, len);
6333
return;
6334
}
6335
6336
/* The advertising reports cannot be merged, so clear
6337
* the pending report and send out a device found event.
6338
*/
6339
clear_pending_adv_report(hdev);
6340
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6341
rssi, flags, data, len, NULL, 0, 0);
6342
return;
6343
}
6344
6345
/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6346
* the new event is a SCAN_RSP. We can therefore proceed with
6347
* sending a merged device found event.
6348
*/
6349
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6350
d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6351
d->last_adv_data, d->last_adv_data_len, data, len, 0);
6352
clear_pending_adv_report(hdev);
6353
}
6354
6355
static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6356
struct sk_buff *skb)
6357
{
6358
struct hci_ev_le_advertising_report *ev = data;
6359
u64 instant = jiffies;
6360
6361
if (!ev->num)
6362
return;
6363
6364
hci_dev_lock(hdev);
6365
6366
while (ev->num--) {
6367
struct hci_ev_le_advertising_info *info;
6368
s8 rssi;
6369
6370
info = hci_le_ev_skb_pull(hdev, skb,
6371
HCI_EV_LE_ADVERTISING_REPORT,
6372
sizeof(*info));
6373
if (!info)
6374
break;
6375
6376
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6377
info->length + 1))
6378
break;
6379
6380
if (info->length <= max_adv_len(hdev)) {
6381
rssi = info->data[info->length];
6382
process_adv_report(hdev, info->type, &info->bdaddr,
6383
info->bdaddr_type, NULL, 0,
6384
HCI_ADV_PHY_1M, 0, rssi,
6385
info->data, info->length, false,
6386
false, instant);
6387
} else {
6388
bt_dev_err(hdev, "Dropping invalid advertising data");
6389
}
6390
}
6391
6392
hci_dev_unlock(hdev);
6393
}
6394
6395
static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6396
{
6397
u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
6398
6399
if (!pdu_type)
6400
return LE_ADV_NONCONN_IND;
6401
6402
if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6403
switch (evt_type) {
6404
case LE_LEGACY_ADV_IND:
6405
return LE_ADV_IND;
6406
case LE_LEGACY_ADV_DIRECT_IND:
6407
return LE_ADV_DIRECT_IND;
6408
case LE_LEGACY_ADV_SCAN_IND:
6409
return LE_ADV_SCAN_IND;
6410
case LE_LEGACY_NONCONN_IND:
6411
return LE_ADV_NONCONN_IND;
6412
case LE_LEGACY_SCAN_RSP_ADV:
6413
case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6414
return LE_ADV_SCAN_RSP;
6415
}
6416
6417
goto invalid;
6418
}
6419
6420
if (evt_type & LE_EXT_ADV_CONN_IND) {
6421
if (evt_type & LE_EXT_ADV_DIRECT_IND)
6422
return LE_ADV_DIRECT_IND;
6423
6424
return LE_ADV_IND;
6425
}
6426
6427
if (evt_type & LE_EXT_ADV_SCAN_RSP)
6428
return LE_ADV_SCAN_RSP;
6429
6430
if (evt_type & LE_EXT_ADV_SCAN_IND)
6431
return LE_ADV_SCAN_IND;
6432
6433
if (evt_type & LE_EXT_ADV_DIRECT_IND)
6434
return LE_ADV_NONCONN_IND;
6435
6436
invalid:
6437
bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6438
evt_type);
6439
6440
return LE_ADV_INVALID;
6441
}
6442
6443
static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6444
struct sk_buff *skb)
6445
{
6446
struct hci_ev_le_ext_adv_report *ev = data;
6447
u64 instant = jiffies;
6448
6449
if (!ev->num)
6450
return;
6451
6452
hci_dev_lock(hdev);
6453
6454
while (ev->num--) {
6455
struct hci_ev_le_ext_adv_info *info;
6456
u8 legacy_evt_type;
6457
u16 evt_type;
6458
6459
info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6460
sizeof(*info));
6461
if (!info)
6462
break;
6463
6464
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6465
info->length))
6466
break;
6467
6468
evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6469
legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6470
6471
if (hci_test_quirk(hdev,
6472
HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) {
6473
info->primary_phy &= 0x1f;
6474
info->secondary_phy &= 0x1f;
6475
}
6476
6477
/* Check if PA Sync is pending and if the hci_conn SID has not
6478
* been set update it.
6479
*/
6480
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
6481
struct hci_conn *conn;
6482
6483
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6484
if (conn && conn->sid == HCI_SID_INVALID)
6485
conn->sid = info->sid;
6486
}
6487
6488
if (legacy_evt_type != LE_ADV_INVALID) {
6489
process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6490
info->bdaddr_type, NULL, 0,
6491
info->primary_phy,
6492
info->secondary_phy,
6493
info->rssi, info->data, info->length,
6494
!(evt_type & LE_EXT_ADV_LEGACY_PDU),
6495
false, instant);
6496
}
6497
}
6498
6499
hci_dev_unlock(hdev);
6500
}
6501
6502
static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data,
6503
struct sk_buff *skb)
6504
{
6505
struct hci_ev_le_pa_sync_established *ev = data;
6506
int mask = hdev->link_mode;
6507
__u8 flags = 0;
6508
struct hci_conn *pa_sync, *conn;
6509
6510
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6511
6512
hci_dev_lock(hdev);
6513
6514
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6515
6516
conn = hci_conn_hash_lookup_create_pa_sync(hdev);
6517
if (!conn) {
6518
bt_dev_err(hdev,
6519
"Unable to find connection for dst %pMR sid 0x%2.2x",
6520
&ev->bdaddr, ev->sid);
6521
goto unlock;
6522
}
6523
6524
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
6525
6526
conn->sync_handle = le16_to_cpu(ev->handle);
6527
conn->sid = HCI_SID_INVALID;
6528
6529
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
6530
&flags);
6531
if (!(mask & HCI_LM_ACCEPT)) {
6532
hci_le_pa_term_sync(hdev, ev->handle);
6533
goto unlock;
6534
}
6535
6536
if (!(flags & HCI_PROTO_DEFER))
6537
goto unlock;
6538
6539
/* Add connection to indicate PA sync event */
6540
pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY, 0,
6541
HCI_ROLE_SLAVE);
6542
6543
if (IS_ERR(pa_sync))
6544
goto unlock;
6545
6546
pa_sync->sync_handle = le16_to_cpu(ev->handle);
6547
6548
if (ev->status) {
6549
set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6550
6551
/* Notify iso layer */
6552
hci_connect_cfm(pa_sync, ev->status);
6553
}
6554
6555
unlock:
6556
hci_dev_unlock(hdev);
6557
}
6558
6559
static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6560
struct sk_buff *skb)
6561
{
6562
struct hci_ev_le_per_adv_report *ev = data;
6563
int mask = hdev->link_mode;
6564
__u8 flags = 0;
6565
struct hci_conn *pa_sync;
6566
6567
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6568
6569
hci_dev_lock(hdev);
6570
6571
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
6572
if (!(mask & HCI_LM_ACCEPT))
6573
goto unlock;
6574
6575
if (!(flags & HCI_PROTO_DEFER))
6576
goto unlock;
6577
6578
pa_sync = hci_conn_hash_lookup_pa_sync_handle
6579
(hdev,
6580
le16_to_cpu(ev->sync_handle));
6581
6582
if (!pa_sync)
6583
goto unlock;
6584
6585
if (ev->data_status == LE_PA_DATA_COMPLETE &&
6586
!test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
6587
/* Notify iso layer */
6588
hci_connect_cfm(pa_sync, 0);
6589
6590
/* Notify MGMT layer */
6591
mgmt_device_connected(hdev, pa_sync, NULL, 0);
6592
}
6593
6594
unlock:
6595
hci_dev_unlock(hdev);
6596
}
6597
6598
static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6599
struct sk_buff *skb)
6600
{
6601
struct hci_ev_le_remote_feat_complete *ev = data;
6602
struct hci_conn *conn;
6603
6604
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6605
6606
hci_dev_lock(hdev);
6607
6608
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6609
if (conn) {
6610
if (!ev->status)
6611
memcpy(conn->features[0], ev->features, 8);
6612
6613
if (conn->state == BT_CONFIG) {
6614
__u8 status;
6615
6616
/* If the local controller supports peripheral-initiated
6617
* features exchange, but the remote controller does
6618
* not, then it is possible that the error code 0x1a
6619
* for unsupported remote feature gets returned.
6620
*
6621
* In this specific case, allow the connection to
6622
* transition into connected state and mark it as
6623
* successful.
6624
*/
6625
if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6626
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6627
status = 0x00;
6628
else
6629
status = ev->status;
6630
6631
conn->state = BT_CONNECTED;
6632
hci_connect_cfm(conn, status);
6633
}
6634
}
6635
6636
hci_dev_unlock(hdev);
6637
}
6638
6639
static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6640
struct sk_buff *skb)
6641
{
6642
struct hci_ev_le_ltk_req *ev = data;
6643
struct hci_cp_le_ltk_reply cp;
6644
struct hci_cp_le_ltk_neg_reply neg;
6645
struct hci_conn *conn;
6646
struct smp_ltk *ltk;
6647
6648
bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6649
6650
hci_dev_lock(hdev);
6651
6652
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6653
if (conn == NULL)
6654
goto not_found;
6655
6656
ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6657
if (!ltk)
6658
goto not_found;
6659
6660
if (smp_ltk_is_sc(ltk)) {
6661
/* With SC both EDiv and Rand are set to zero */
6662
if (ev->ediv || ev->rand)
6663
goto not_found;
6664
} else {
6665
/* For non-SC keys check that EDiv and Rand match */
6666
if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6667
goto not_found;
6668
}
6669
6670
memcpy(cp.ltk, ltk->val, ltk->enc_size);
6671
memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6672
cp.handle = cpu_to_le16(conn->handle);
6673
6674
conn->pending_sec_level = smp_ltk_sec_level(ltk);
6675
6676
conn->enc_key_size = ltk->enc_size;
6677
6678
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6679
6680
/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6681
* temporary key used to encrypt a connection following
6682
* pairing. It is used during the Encrypted Session Setup to
6683
* distribute the keys. Later, security can be re-established
6684
* using a distributed LTK.
6685
*/
6686
if (ltk->type == SMP_STK) {
6687
set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6688
list_del_rcu(&ltk->list);
6689
kfree_rcu(ltk, rcu);
6690
} else {
6691
clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6692
}
6693
6694
hci_dev_unlock(hdev);
6695
6696
return;
6697
6698
not_found:
6699
neg.handle = ev->handle;
6700
hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6701
hci_dev_unlock(hdev);
6702
}
6703
6704
static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6705
u8 reason)
6706
{
6707
struct hci_cp_le_conn_param_req_neg_reply cp;
6708
6709
cp.handle = cpu_to_le16(handle);
6710
cp.reason = reason;
6711
6712
hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6713
&cp);
6714
}
6715
6716
static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6717
struct sk_buff *skb)
6718
{
6719
struct hci_ev_le_remote_conn_param_req *ev = data;
6720
struct hci_cp_le_conn_param_req_reply cp;
6721
struct hci_conn *hcon;
6722
u16 handle, min, max, latency, timeout;
6723
6724
bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6725
6726
handle = le16_to_cpu(ev->handle);
6727
min = le16_to_cpu(ev->interval_min);
6728
max = le16_to_cpu(ev->interval_max);
6729
latency = le16_to_cpu(ev->latency);
6730
timeout = le16_to_cpu(ev->timeout);
6731
6732
hcon = hci_conn_hash_lookup_handle(hdev, handle);
6733
if (!hcon || hcon->state != BT_CONNECTED)
6734
return send_conn_param_neg_reply(hdev, handle,
6735
HCI_ERROR_UNKNOWN_CONN_ID);
6736
6737
if (max > hcon->le_conn_max_interval)
6738
return send_conn_param_neg_reply(hdev, handle,
6739
HCI_ERROR_INVALID_LL_PARAMS);
6740
6741
if (hci_check_conn_params(min, max, latency, timeout))
6742
return send_conn_param_neg_reply(hdev, handle,
6743
HCI_ERROR_INVALID_LL_PARAMS);
6744
6745
if (hcon->role == HCI_ROLE_MASTER) {
6746
struct hci_conn_params *params;
6747
u8 store_hint;
6748
6749
hci_dev_lock(hdev);
6750
6751
params = hci_conn_params_lookup(hdev, &hcon->dst,
6752
hcon->dst_type);
6753
if (params) {
6754
params->conn_min_interval = min;
6755
params->conn_max_interval = max;
6756
params->conn_latency = latency;
6757
params->supervision_timeout = timeout;
6758
store_hint = 0x01;
6759
} else {
6760
store_hint = 0x00;
6761
}
6762
6763
hci_dev_unlock(hdev);
6764
6765
mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6766
store_hint, min, max, latency, timeout);
6767
}
6768
6769
cp.handle = ev->handle;
6770
cp.interval_min = ev->interval_min;
6771
cp.interval_max = ev->interval_max;
6772
cp.latency = ev->latency;
6773
cp.timeout = ev->timeout;
6774
cp.min_ce_len = 0;
6775
cp.max_ce_len = 0;
6776
6777
hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6778
}
6779
6780
static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6781
struct sk_buff *skb)
6782
{
6783
struct hci_ev_le_direct_adv_report *ev = data;
6784
u64 instant = jiffies;
6785
int i;
6786
6787
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6788
flex_array_size(ev, info, ev->num)))
6789
return;
6790
6791
if (!ev->num)
6792
return;
6793
6794
hci_dev_lock(hdev);
6795
6796
for (i = 0; i < ev->num; i++) {
6797
struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6798
6799
process_adv_report(hdev, info->type, &info->bdaddr,
6800
info->bdaddr_type, &info->direct_addr,
6801
info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6802
info->rssi, NULL, 0, false, false, instant);
6803
}
6804
6805
hci_dev_unlock(hdev);
6806
}
6807
6808
static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6809
struct sk_buff *skb)
6810
{
6811
struct hci_ev_le_phy_update_complete *ev = data;
6812
struct hci_conn *conn;
6813
6814
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6815
6816
if (ev->status)
6817
return;
6818
6819
hci_dev_lock(hdev);
6820
6821
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6822
if (!conn)
6823
goto unlock;
6824
6825
conn->le_tx_phy = ev->tx_phy;
6826
conn->le_rx_phy = ev->rx_phy;
6827
6828
unlock:
6829
hci_dev_unlock(hdev);
6830
}
6831
6832
static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
6833
struct sk_buff *skb)
6834
{
6835
struct hci_evt_le_cis_established *ev = data;
6836
struct hci_conn *conn;
6837
struct bt_iso_qos *qos;
6838
bool pending = false;
6839
u16 handle = __le16_to_cpu(ev->handle);
6840
u32 c_sdu_interval, p_sdu_interval;
6841
6842
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6843
6844
hci_dev_lock(hdev);
6845
6846
conn = hci_conn_hash_lookup_handle(hdev, handle);
6847
if (!conn) {
6848
bt_dev_err(hdev,
6849
"Unable to find connection with handle 0x%4.4x",
6850
handle);
6851
goto unlock;
6852
}
6853
6854
if (conn->type != CIS_LINK) {
6855
bt_dev_err(hdev,
6856
"Invalid connection link type handle 0x%4.4x",
6857
handle);
6858
goto unlock;
6859
}
6860
6861
qos = &conn->iso_qos;
6862
6863
pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6864
6865
/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
6866
* page 3075:
6867
* Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
6868
* ISO_Interval + SDU_Interval_C_To_P
6869
* ...
6870
* SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
6871
* Transport_Latency
6872
*/
6873
c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6874
(ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
6875
get_unaligned_le24(ev->c_latency);
6876
p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
6877
(ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
6878
get_unaligned_le24(ev->p_latency);
6879
6880
switch (conn->role) {
6881
case HCI_ROLE_SLAVE:
6882
qos->ucast.in.interval = c_sdu_interval;
6883
qos->ucast.out.interval = p_sdu_interval;
6884
/* Convert Transport Latency (us) to Latency (msec) */
6885
qos->ucast.in.latency =
6886
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6887
1000);
6888
qos->ucast.out.latency =
6889
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6890
1000);
6891
qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6892
qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6893
qos->ucast.in.phy = ev->c_phy;
6894
qos->ucast.out.phy = ev->p_phy;
6895
break;
6896
case HCI_ROLE_MASTER:
6897
qos->ucast.in.interval = p_sdu_interval;
6898
qos->ucast.out.interval = c_sdu_interval;
6899
/* Convert Transport Latency (us) to Latency (msec) */
6900
qos->ucast.out.latency =
6901
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6902
1000);
6903
qos->ucast.in.latency =
6904
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6905
1000);
6906
qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
6907
qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
6908
qos->ucast.out.phy = ev->c_phy;
6909
qos->ucast.in.phy = ev->p_phy;
6910
break;
6911
}
6912
6913
if (!ev->status) {
6914
conn->state = BT_CONNECTED;
6915
hci_debugfs_create_conn(conn);
6916
hci_conn_add_sysfs(conn);
6917
hci_iso_setup_path(conn);
6918
goto unlock;
6919
}
6920
6921
conn->state = BT_CLOSED;
6922
hci_connect_cfm(conn, ev->status);
6923
hci_conn_del(conn);
6924
6925
unlock:
6926
if (pending)
6927
hci_le_create_cis_pending(hdev);
6928
6929
hci_dev_unlock(hdev);
6930
}
6931
6932
static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6933
{
6934
struct hci_cp_le_reject_cis cp;
6935
6936
memset(&cp, 0, sizeof(cp));
6937
cp.handle = handle;
6938
cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6939
hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6940
}
6941
6942
static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6943
{
6944
struct hci_cp_le_accept_cis cp;
6945
6946
memset(&cp, 0, sizeof(cp));
6947
cp.handle = handle;
6948
hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6949
}
6950
6951
static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6952
struct sk_buff *skb)
6953
{
6954
struct hci_evt_le_cis_req *ev = data;
6955
u16 acl_handle, cis_handle;
6956
struct hci_conn *acl, *cis;
6957
int mask;
6958
__u8 flags = 0;
6959
6960
acl_handle = __le16_to_cpu(ev->acl_handle);
6961
cis_handle = __le16_to_cpu(ev->cis_handle);
6962
6963
bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6964
acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6965
6966
hci_dev_lock(hdev);
6967
6968
acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6969
if (!acl)
6970
goto unlock;
6971
6972
mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags);
6973
if (!(mask & HCI_LM_ACCEPT)) {
6974
hci_le_reject_cis(hdev, ev->cis_handle);
6975
goto unlock;
6976
}
6977
6978
cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6979
if (!cis) {
6980
cis = hci_conn_add(hdev, CIS_LINK, &acl->dst, acl->dst_type,
6981
HCI_ROLE_SLAVE, cis_handle);
6982
if (IS_ERR(cis)) {
6983
hci_le_reject_cis(hdev, ev->cis_handle);
6984
goto unlock;
6985
}
6986
}
6987
6988
cis->iso_qos.ucast.cig = ev->cig_id;
6989
cis->iso_qos.ucast.cis = ev->cis_id;
6990
6991
if (!(flags & HCI_PROTO_DEFER)) {
6992
hci_le_accept_cis(hdev, ev->cis_handle);
6993
} else {
6994
cis->state = BT_CONNECT2;
6995
hci_connect_cfm(cis, 0);
6996
}
6997
6998
unlock:
6999
hci_dev_unlock(hdev);
7000
}
7001
7002
static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7003
{
7004
u8 handle = PTR_UINT(data);
7005
7006
return hci_le_terminate_big_sync(hdev, handle,
7007
HCI_ERROR_LOCAL_HOST_TERM);
7008
}
7009
7010
static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7011
struct sk_buff *skb)
7012
{
7013
struct hci_evt_le_create_big_complete *ev = data;
7014
struct hci_conn *conn;
7015
__u8 i = 0;
7016
7017
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7018
7019
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7020
flex_array_size(ev, bis_handle, ev->num_bis)))
7021
return;
7022
7023
hci_dev_lock(hdev);
7024
7025
/* Connect all BISes that are bound to the BIG */
7026
while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7027
BT_BOUND,
7028
HCI_ROLE_MASTER))) {
7029
if (ev->status) {
7030
hci_connect_cfm(conn, ev->status);
7031
hci_conn_del(conn);
7032
continue;
7033
}
7034
7035
if (hci_conn_set_handle(conn,
7036
__le16_to_cpu(ev->bis_handle[i++])))
7037
continue;
7038
7039
conn->state = BT_CONNECTED;
7040
set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7041
hci_debugfs_create_conn(conn);
7042
hci_conn_add_sysfs(conn);
7043
hci_iso_setup_path(conn);
7044
}
7045
7046
if (!ev->status && !i)
7047
/* If no BISes have been connected for the BIG,
7048
* terminate. This is in case all bound connections
7049
* have been closed before the BIG creation
7050
* has completed.
7051
*/
7052
hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7053
UINT_PTR(ev->handle), NULL);
7054
7055
hci_dev_unlock(hdev);
7056
}
7057
7058
static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7059
struct sk_buff *skb)
7060
{
7061
struct hci_evt_le_big_sync_established *ev = data;
7062
struct hci_conn *bis, *conn;
7063
int i;
7064
7065
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7066
7067
if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7068
flex_array_size(ev, bis, ev->num_bis)))
7069
return;
7070
7071
hci_dev_lock(hdev);
7072
7073
conn = hci_conn_hash_lookup_big_sync_pend(hdev, ev->handle,
7074
ev->num_bis);
7075
if (!conn) {
7076
bt_dev_err(hdev,
7077
"Unable to find connection for big 0x%2.2x",
7078
ev->handle);
7079
goto unlock;
7080
}
7081
7082
clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
7083
7084
conn->num_bis = 0;
7085
memset(conn->bis, 0, sizeof(conn->num_bis));
7086
7087
for (i = 0; i < ev->num_bis; i++) {
7088
u16 handle = le16_to_cpu(ev->bis[i]);
7089
__le32 interval;
7090
7091
bis = hci_conn_hash_lookup_handle(hdev, handle);
7092
if (!bis) {
7093
if (handle > HCI_CONN_HANDLE_MAX) {
7094
bt_dev_dbg(hdev, "ignore too large handle %u", handle);
7095
continue;
7096
}
7097
bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY, 0,
7098
HCI_ROLE_SLAVE, handle);
7099
if (IS_ERR(bis))
7100
continue;
7101
}
7102
7103
if (ev->status != 0x42)
7104
/* Mark PA sync as established */
7105
set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7106
7107
bis->sync_handle = conn->sync_handle;
7108
bis->iso_qos.bcast.big = ev->handle;
7109
memset(&interval, 0, sizeof(interval));
7110
memcpy(&interval, ev->latency, sizeof(ev->latency));
7111
bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7112
/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7113
bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7114
bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7115
7116
if (!ev->status) {
7117
bis->state = BT_CONNECTED;
7118
set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7119
hci_debugfs_create_conn(bis);
7120
hci_conn_add_sysfs(bis);
7121
hci_iso_setup_path(bis);
7122
}
7123
}
7124
7125
/* In case BIG sync failed, notify each failed connection to
7126
* the user after all hci connections have been added
7127
*/
7128
if (ev->status)
7129
for (i = 0; i < ev->num_bis; i++) {
7130
u16 handle = le16_to_cpu(ev->bis[i]);
7131
7132
bis = hci_conn_hash_lookup_handle(hdev, handle);
7133
if (!bis)
7134
continue;
7135
7136
set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7137
hci_connect_cfm(bis, ev->status);
7138
}
7139
7140
unlock:
7141
hci_dev_unlock(hdev);
7142
}
7143
7144
static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
7145
struct sk_buff *skb)
7146
{
7147
struct hci_evt_le_big_sync_lost *ev = data;
7148
struct hci_conn *bis;
7149
bool mgmt_conn = false;
7150
7151
bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
7152
7153
hci_dev_lock(hdev);
7154
7155
/* Delete each bis connection */
7156
while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
7157
BT_CONNECTED,
7158
HCI_ROLE_SLAVE))) {
7159
if (!mgmt_conn) {
7160
mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED,
7161
&bis->flags);
7162
mgmt_device_disconnected(hdev, &bis->dst, bis->type,
7163
bis->dst_type, ev->reason,
7164
mgmt_conn);
7165
}
7166
7167
clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7168
hci_disconn_cfm(bis, ev->reason);
7169
hci_conn_del(bis);
7170
}
7171
7172
hci_dev_unlock(hdev);
7173
}
7174
7175
static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7176
struct sk_buff *skb)
7177
{
7178
struct hci_evt_le_big_info_adv_report *ev = data;
7179
int mask = hdev->link_mode;
7180
__u8 flags = 0;
7181
struct hci_conn *pa_sync;
7182
7183
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7184
7185
hci_dev_lock(hdev);
7186
7187
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
7188
if (!(mask & HCI_LM_ACCEPT))
7189
goto unlock;
7190
7191
if (!(flags & HCI_PROTO_DEFER))
7192
goto unlock;
7193
7194
pa_sync = hci_conn_hash_lookup_pa_sync_handle
7195
(hdev,
7196
le16_to_cpu(ev->sync_handle));
7197
7198
if (!pa_sync)
7199
goto unlock;
7200
7201
pa_sync->iso_qos.bcast.encryption = ev->encryption;
7202
7203
/* Notify iso layer */
7204
hci_connect_cfm(pa_sync, 0);
7205
7206
unlock:
7207
hci_dev_unlock(hdev);
7208
}
7209
7210
static void hci_le_read_all_remote_features_evt(struct hci_dev *hdev,
7211
void *data, struct sk_buff *skb)
7212
{
7213
struct hci_evt_le_read_all_remote_features_complete *ev = data;
7214
struct hci_conn *conn;
7215
7216
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7217
7218
hci_dev_lock(hdev);
7219
7220
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7221
if (!conn)
7222
goto unlock;
7223
7224
if (!ev->status)
7225
memcpy(conn->le_features, ev->features, 248);
7226
7227
if (conn->state == BT_CONFIG) {
7228
__u8 status;
7229
7230
/* If the local controller supports peripheral-initiated
7231
* features exchange, but the remote controller does
7232
* not, then it is possible that the error code 0x1a
7233
* for unsupported remote feature gets returned.
7234
*
7235
* In this specific case, allow the connection to
7236
* transition into connected state and mark it as
7237
* successful.
7238
*/
7239
if (!conn->out &&
7240
ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
7241
(hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
7242
status = 0x00;
7243
else
7244
status = ev->status;
7245
7246
conn->state = BT_CONNECTED;
7247
hci_connect_cfm(conn, status);
7248
}
7249
7250
unlock:
7251
hci_dev_unlock(hdev);
7252
}
7253
7254
#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7255
[_op] = { \
7256
.func = _func, \
7257
.min_len = _min_len, \
7258
.max_len = _max_len, \
7259
}
7260
7261
#define HCI_LE_EV(_op, _func, _len) \
7262
HCI_LE_EV_VL(_op, _func, _len, _len)
7263
7264
#define HCI_LE_EV_STATUS(_op, _func) \
7265
HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7266
7267
/* Entries in this table shall have their position according to the subevent
7268
* opcode they handle so the use of the macros above is recommend since it does
7269
* attempt to initialize at its proper index using Designated Initializers that
7270
* way events without a callback function can be omitted.
7271
*/
7272
static const struct hci_le_ev {
7273
void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7274
u16 min_len;
7275
u16 max_len;
7276
} hci_le_ev_table[U8_MAX + 1] = {
7277
/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7278
HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7279
sizeof(struct hci_ev_le_conn_complete)),
7280
/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7281
HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7282
sizeof(struct hci_ev_le_advertising_report),
7283
HCI_MAX_EVENT_SIZE),
7284
/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7285
HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7286
hci_le_conn_update_complete_evt,
7287
sizeof(struct hci_ev_le_conn_update_complete)),
7288
/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7289
HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7290
hci_le_remote_feat_complete_evt,
7291
sizeof(struct hci_ev_le_remote_feat_complete)),
7292
/* [0x05 = HCI_EV_LE_LTK_REQ] */
7293
HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7294
sizeof(struct hci_ev_le_ltk_req)),
7295
/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7296
HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7297
hci_le_remote_conn_param_req_evt,
7298
sizeof(struct hci_ev_le_remote_conn_param_req)),
7299
/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7300
HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7301
hci_le_enh_conn_complete_evt,
7302
sizeof(struct hci_ev_le_enh_conn_complete)),
7303
/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7304
HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7305
sizeof(struct hci_ev_le_direct_adv_report),
7306
HCI_MAX_EVENT_SIZE),
7307
/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7308
HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7309
sizeof(struct hci_ev_le_phy_update_complete)),
7310
/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7311
HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7312
sizeof(struct hci_ev_le_ext_adv_report),
7313
HCI_MAX_EVENT_SIZE),
7314
/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7315
HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7316
hci_le_pa_sync_established_evt,
7317
sizeof(struct hci_ev_le_pa_sync_established)),
7318
/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7319
HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7320
hci_le_per_adv_report_evt,
7321
sizeof(struct hci_ev_le_per_adv_report),
7322
HCI_MAX_EVENT_SIZE),
7323
/* [0x10 = HCI_EV_LE_PA_SYNC_LOST] */
7324
HCI_LE_EV(HCI_EV_LE_PA_SYNC_LOST, hci_le_pa_sync_lost_evt,
7325
sizeof(struct hci_ev_le_pa_sync_lost)),
7326
/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7327
HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7328
sizeof(struct hci_evt_le_ext_adv_set_term)),
7329
/* [0x18 = HCI_EVT_LE_PAST_RECEIVED] */
7330
HCI_LE_EV(HCI_EV_LE_PAST_RECEIVED,
7331
hci_le_past_received_evt,
7332
sizeof(struct hci_ev_le_past_received)),
7333
/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7334
HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt,
7335
sizeof(struct hci_evt_le_cis_established)),
7336
/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7337
HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7338
sizeof(struct hci_evt_le_cis_req)),
7339
/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7340
HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7341
hci_le_create_big_complete_evt,
7342
sizeof(struct hci_evt_le_create_big_complete),
7343
HCI_MAX_EVENT_SIZE),
7344
/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
7345
HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
7346
hci_le_big_sync_established_evt,
7347
sizeof(struct hci_evt_le_big_sync_established),
7348
HCI_MAX_EVENT_SIZE),
7349
/* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */
7350
HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST,
7351
hci_le_big_sync_lost_evt,
7352
sizeof(struct hci_evt_le_big_sync_lost),
7353
HCI_MAX_EVENT_SIZE),
7354
/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7355
HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7356
hci_le_big_info_adv_report_evt,
7357
sizeof(struct hci_evt_le_big_info_adv_report),
7358
HCI_MAX_EVENT_SIZE),
7359
/* [0x2b = HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE] */
7360
HCI_LE_EV_VL(HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE,
7361
hci_le_read_all_remote_features_evt,
7362
sizeof(struct
7363
hci_evt_le_read_all_remote_features_complete),
7364
HCI_MAX_EVENT_SIZE),
7365
};
7366
7367
static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7368
struct sk_buff *skb, u16 *opcode, u8 *status,
7369
hci_req_complete_t *req_complete,
7370
hci_req_complete_skb_t *req_complete_skb)
7371
{
7372
struct hci_ev_le_meta *ev = data;
7373
const struct hci_le_ev *subev;
7374
7375
bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7376
7377
/* Only match event if command OGF is for LE */
7378
if (hdev->req_skb &&
7379
(hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 ||
7380
hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) &&
7381
hci_skb_event(hdev->req_skb) == ev->subevent) {
7382
*opcode = hci_skb_opcode(hdev->req_skb);
7383
hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7384
req_complete_skb);
7385
}
7386
7387
subev = &hci_le_ev_table[ev->subevent];
7388
if (!subev->func)
7389
return;
7390
7391
if (skb->len < subev->min_len) {
7392
bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7393
ev->subevent, skb->len, subev->min_len);
7394
return;
7395
}
7396
7397
/* Just warn if the length is over max_len size it still be
7398
* possible to partially parse the event so leave to callback to
7399
* decide if that is acceptable.
7400
*/
7401
if (skb->len > subev->max_len)
7402
bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7403
ev->subevent, skb->len, subev->max_len);
7404
data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7405
if (!data)
7406
return;
7407
7408
subev->func(hdev, data, skb);
7409
}
7410
7411
static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7412
u8 event, struct sk_buff *skb)
7413
{
7414
struct hci_ev_cmd_complete *ev;
7415
struct hci_event_hdr *hdr;
7416
7417
if (!skb)
7418
return false;
7419
7420
hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7421
if (!hdr)
7422
return false;
7423
7424
if (event) {
7425
if (hdr->evt != event)
7426
return false;
7427
return true;
7428
}
7429
7430
/* Check if request ended in Command Status - no way to retrieve
7431
* any extra parameters in this case.
7432
*/
7433
if (hdr->evt == HCI_EV_CMD_STATUS)
7434
return false;
7435
7436
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7437
bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7438
hdr->evt);
7439
return false;
7440
}
7441
7442
ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7443
if (!ev)
7444
return false;
7445
7446
if (opcode != __le16_to_cpu(ev->opcode)) {
7447
BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7448
__le16_to_cpu(ev->opcode));
7449
return false;
7450
}
7451
7452
return true;
7453
}
7454
7455
static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7456
struct sk_buff *skb)
7457
{
7458
struct hci_ev_le_advertising_info *adv;
7459
struct hci_ev_le_direct_adv_info *direct_adv;
7460
struct hci_ev_le_ext_adv_info *ext_adv;
7461
const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7462
const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7463
7464
hci_dev_lock(hdev);
7465
7466
/* If we are currently suspended and this is the first BT event seen,
7467
* save the wake reason associated with the event.
7468
*/
7469
if (!hdev->suspended || hdev->wake_reason)
7470
goto unlock;
7471
7472
/* Default to remote wake. Values for wake_reason are documented in the
7473
* Bluez mgmt api docs.
7474
*/
7475
hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7476
7477
/* Once configured for remote wakeup, we should only wake up for
7478
* reconnections. It's useful to see which device is waking us up so
7479
* keep track of the bdaddr of the connection event that woke us up.
7480
*/
7481
if (event == HCI_EV_CONN_REQUEST) {
7482
bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7483
hdev->wake_addr_type = BDADDR_BREDR;
7484
} else if (event == HCI_EV_CONN_COMPLETE) {
7485
bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7486
hdev->wake_addr_type = BDADDR_BREDR;
7487
} else if (event == HCI_EV_LE_META) {
7488
struct hci_ev_le_meta *le_ev = (void *)skb->data;
7489
u8 subevent = le_ev->subevent;
7490
u8 *ptr = &skb->data[sizeof(*le_ev)];
7491
u8 num_reports = *ptr;
7492
7493
if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7494
subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7495
subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7496
num_reports) {
7497
adv = (void *)(ptr + 1);
7498
direct_adv = (void *)(ptr + 1);
7499
ext_adv = (void *)(ptr + 1);
7500
7501
switch (subevent) {
7502
case HCI_EV_LE_ADVERTISING_REPORT:
7503
bacpy(&hdev->wake_addr, &adv->bdaddr);
7504
hdev->wake_addr_type = adv->bdaddr_type;
7505
break;
7506
case HCI_EV_LE_DIRECT_ADV_REPORT:
7507
bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7508
hdev->wake_addr_type = direct_adv->bdaddr_type;
7509
break;
7510
case HCI_EV_LE_EXT_ADV_REPORT:
7511
bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7512
hdev->wake_addr_type = ext_adv->bdaddr_type;
7513
break;
7514
}
7515
}
7516
} else {
7517
hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7518
}
7519
7520
unlock:
7521
hci_dev_unlock(hdev);
7522
}
7523
7524
#define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7525
[_op] = { \
7526
.req = false, \
7527
.func = _func, \
7528
.min_len = _min_len, \
7529
.max_len = _max_len, \
7530
}
7531
7532
#define HCI_EV(_op, _func, _len) \
7533
HCI_EV_VL(_op, _func, _len, _len)
7534
7535
#define HCI_EV_STATUS(_op, _func) \
7536
HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7537
7538
#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7539
[_op] = { \
7540
.req = true, \
7541
.func_req = _func, \
7542
.min_len = _min_len, \
7543
.max_len = _max_len, \
7544
}
7545
7546
#define HCI_EV_REQ(_op, _func, _len) \
7547
HCI_EV_REQ_VL(_op, _func, _len, _len)
7548
7549
/* Entries in this table shall have their position according to the event opcode
7550
* they handle so the use of the macros above is recommend since it does attempt
7551
* to initialize at its proper index using Designated Initializers that way
7552
* events without a callback function don't have entered.
7553
*/
7554
static const struct hci_ev {
7555
bool req;
7556
union {
7557
void (*func)(struct hci_dev *hdev, void *data,
7558
struct sk_buff *skb);
7559
void (*func_req)(struct hci_dev *hdev, void *data,
7560
struct sk_buff *skb, u16 *opcode, u8 *status,
7561
hci_req_complete_t *req_complete,
7562
hci_req_complete_skb_t *req_complete_skb);
7563
};
7564
u16 min_len;
7565
u16 max_len;
7566
} hci_ev_table[U8_MAX + 1] = {
7567
/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7568
HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7569
/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7570
HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7571
sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7572
/* [0x03 = HCI_EV_CONN_COMPLETE] */
7573
HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7574
sizeof(struct hci_ev_conn_complete)),
7575
/* [0x04 = HCI_EV_CONN_REQUEST] */
7576
HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7577
sizeof(struct hci_ev_conn_request)),
7578
/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7579
HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7580
sizeof(struct hci_ev_disconn_complete)),
7581
/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7582
HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7583
sizeof(struct hci_ev_auth_complete)),
7584
/* [0x07 = HCI_EV_REMOTE_NAME] */
7585
HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7586
sizeof(struct hci_ev_remote_name)),
7587
/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7588
HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7589
sizeof(struct hci_ev_encrypt_change)),
7590
/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7591
HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7592
hci_change_link_key_complete_evt,
7593
sizeof(struct hci_ev_change_link_key_complete)),
7594
/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7595
HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7596
sizeof(struct hci_ev_remote_features)),
7597
/* [0x0e = HCI_EV_CMD_COMPLETE] */
7598
HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7599
sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7600
/* [0x0f = HCI_EV_CMD_STATUS] */
7601
HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7602
sizeof(struct hci_ev_cmd_status)),
7603
/* [0x10 = HCI_EV_CMD_STATUS] */
7604
HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7605
sizeof(struct hci_ev_hardware_error)),
7606
/* [0x12 = HCI_EV_ROLE_CHANGE] */
7607
HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7608
sizeof(struct hci_ev_role_change)),
7609
/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7610
HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7611
sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7612
/* [0x14 = HCI_EV_MODE_CHANGE] */
7613
HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7614
sizeof(struct hci_ev_mode_change)),
7615
/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7616
HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7617
sizeof(struct hci_ev_pin_code_req)),
7618
/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7619
HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7620
sizeof(struct hci_ev_link_key_req)),
7621
/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7622
HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7623
sizeof(struct hci_ev_link_key_notify)),
7624
/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7625
HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7626
sizeof(struct hci_ev_clock_offset)),
7627
/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7628
HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7629
sizeof(struct hci_ev_pkt_type_change)),
7630
/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7631
HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7632
sizeof(struct hci_ev_pscan_rep_mode)),
7633
/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7634
HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7635
hci_inquiry_result_with_rssi_evt,
7636
sizeof(struct hci_ev_inquiry_result_rssi),
7637
HCI_MAX_EVENT_SIZE),
7638
/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7639
HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7640
sizeof(struct hci_ev_remote_ext_features)),
7641
/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7642
HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7643
sizeof(struct hci_ev_sync_conn_complete)),
7644
/* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7645
HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7646
hci_extended_inquiry_result_evt,
7647
sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7648
/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7649
HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7650
sizeof(struct hci_ev_key_refresh_complete)),
7651
/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7652
HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7653
sizeof(struct hci_ev_io_capa_request)),
7654
/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7655
HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7656
sizeof(struct hci_ev_io_capa_reply)),
7657
/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7658
HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7659
sizeof(struct hci_ev_user_confirm_req)),
7660
/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7661
HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7662
sizeof(struct hci_ev_user_passkey_req)),
7663
/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7664
HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7665
sizeof(struct hci_ev_remote_oob_data_request)),
7666
/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7667
HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7668
sizeof(struct hci_ev_simple_pair_complete)),
7669
/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7670
HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7671
sizeof(struct hci_ev_user_passkey_notify)),
7672
/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7673
HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7674
sizeof(struct hci_ev_keypress_notify)),
7675
/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7676
HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7677
sizeof(struct hci_ev_remote_host_features)),
7678
/* [0x3e = HCI_EV_LE_META] */
7679
HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7680
sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7681
/* [0xff = HCI_EV_VENDOR] */
7682
HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7683
};
7684
7685
static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7686
u16 *opcode, u8 *status,
7687
hci_req_complete_t *req_complete,
7688
hci_req_complete_skb_t *req_complete_skb)
7689
{
7690
const struct hci_ev *ev = &hci_ev_table[event];
7691
void *data;
7692
7693
if (!ev->func)
7694
return;
7695
7696
if (skb->len < ev->min_len) {
7697
bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7698
event, skb->len, ev->min_len);
7699
return;
7700
}
7701
7702
/* Just warn if the length is over max_len size it still be
7703
* possible to partially parse the event so leave to callback to
7704
* decide if that is acceptable.
7705
*/
7706
if (skb->len > ev->max_len)
7707
bt_dev_warn_ratelimited(hdev,
7708
"unexpected event 0x%2.2x length: %u > %u",
7709
event, skb->len, ev->max_len);
7710
7711
data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7712
if (!data)
7713
return;
7714
7715
if (ev->req)
7716
ev->func_req(hdev, data, skb, opcode, status, req_complete,
7717
req_complete_skb);
7718
else
7719
ev->func(hdev, data, skb);
7720
}
7721
7722
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7723
{
7724
struct hci_event_hdr *hdr = (void *) skb->data;
7725
hci_req_complete_t req_complete = NULL;
7726
hci_req_complete_skb_t req_complete_skb = NULL;
7727
struct sk_buff *orig_skb = NULL;
7728
u8 status = 0, event, req_evt = 0;
7729
u16 opcode = HCI_OP_NOP;
7730
7731
if (skb->len < sizeof(*hdr)) {
7732
bt_dev_err(hdev, "Malformed HCI Event");
7733
goto done;
7734
}
7735
7736
hci_dev_lock(hdev);
7737
kfree_skb(hdev->recv_event);
7738
hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7739
hci_dev_unlock(hdev);
7740
7741
event = hdr->evt;
7742
if (!event) {
7743
bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7744
event);
7745
goto done;
7746
}
7747
7748
/* Only match event if command OGF is not for LE */
7749
if (hdev->req_skb &&
7750
hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7751
hci_skb_event(hdev->req_skb) == event) {
7752
hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7753
status, &req_complete, &req_complete_skb);
7754
req_evt = event;
7755
}
7756
7757
/* If it looks like we might end up having to call
7758
* req_complete_skb, store a pristine copy of the skb since the
7759
* various handlers may modify the original one through
7760
* skb_pull() calls, etc.
7761
*/
7762
if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7763
event == HCI_EV_CMD_COMPLETE)
7764
orig_skb = skb_clone(skb, GFP_KERNEL);
7765
7766
skb_pull(skb, HCI_EVENT_HDR_SIZE);
7767
7768
/* Store wake reason if we're suspended */
7769
hci_store_wake_reason(hdev, event, skb);
7770
7771
bt_dev_dbg(hdev, "event 0x%2.2x", event);
7772
7773
hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7774
&req_complete_skb);
7775
7776
if (req_complete) {
7777
req_complete(hdev, status, opcode);
7778
} else if (req_complete_skb) {
7779
if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7780
kfree_skb(orig_skb);
7781
orig_skb = NULL;
7782
}
7783
req_complete_skb(hdev, status, opcode, orig_skb);
7784
}
7785
7786
done:
7787
kfree_skb(orig_skb);
7788
kfree_skb(skb);
7789
hdev->stat.evt_rx++;
7790
}
7791
7792