Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/bluetooth/hci_event.c
15109 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
6
7
This program is free software; you can redistribute it and/or modify
8
it under the terms of the GNU General Public License version 2 as
9
published by the Free Software Foundation;
10
11
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22
SOFTWARE IS DISCLAIMED.
23
*/
24
25
/* Bluetooth HCI event handling. */
26
27
#include <linux/module.h>
28
29
#include <linux/types.h>
30
#include <linux/errno.h>
31
#include <linux/kernel.h>
32
#include <linux/slab.h>
33
#include <linux/poll.h>
34
#include <linux/fcntl.h>
35
#include <linux/init.h>
36
#include <linux/skbuff.h>
37
#include <linux/interrupt.h>
38
#include <linux/notifier.h>
39
#include <net/sock.h>
40
41
#include <asm/system.h>
42
#include <linux/uaccess.h>
43
#include <asm/unaligned.h>
44
45
#include <net/bluetooth/bluetooth.h>
46
#include <net/bluetooth/hci_core.h>
47
48
/* Handle HCI Event packets */
49
50
static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
51
{
52
__u8 status = *((__u8 *) skb->data);
53
54
BT_DBG("%s status 0x%x", hdev->name, status);
55
56
if (status)
57
return;
58
59
if (test_bit(HCI_MGMT, &hdev->flags) &&
60
test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
61
mgmt_discovering(hdev->id, 0);
62
63
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
64
65
hci_conn_check_pending(hdev);
66
}
67
68
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
69
{
70
__u8 status = *((__u8 *) skb->data);
71
72
BT_DBG("%s status 0x%x", hdev->name, status);
73
74
if (status)
75
return;
76
77
if (test_bit(HCI_MGMT, &hdev->flags) &&
78
test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
79
mgmt_discovering(hdev->id, 0);
80
81
hci_conn_check_pending(hdev);
82
}
83
84
static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
85
{
86
BT_DBG("%s", hdev->name);
87
}
88
89
static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
90
{
91
struct hci_rp_role_discovery *rp = (void *) skb->data;
92
struct hci_conn *conn;
93
94
BT_DBG("%s status 0x%x", hdev->name, rp->status);
95
96
if (rp->status)
97
return;
98
99
hci_dev_lock(hdev);
100
101
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102
if (conn) {
103
if (rp->role)
104
conn->link_mode &= ~HCI_LM_MASTER;
105
else
106
conn->link_mode |= HCI_LM_MASTER;
107
}
108
109
hci_dev_unlock(hdev);
110
}
111
112
static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113
{
114
struct hci_rp_read_link_policy *rp = (void *) skb->data;
115
struct hci_conn *conn;
116
117
BT_DBG("%s status 0x%x", hdev->name, rp->status);
118
119
if (rp->status)
120
return;
121
122
hci_dev_lock(hdev);
123
124
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125
if (conn)
126
conn->link_policy = __le16_to_cpu(rp->policy);
127
128
hci_dev_unlock(hdev);
129
}
130
131
static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132
{
133
struct hci_rp_write_link_policy *rp = (void *) skb->data;
134
struct hci_conn *conn;
135
void *sent;
136
137
BT_DBG("%s status 0x%x", hdev->name, rp->status);
138
139
if (rp->status)
140
return;
141
142
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143
if (!sent)
144
return;
145
146
hci_dev_lock(hdev);
147
148
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149
if (conn)
150
conn->link_policy = get_unaligned_le16(sent + 2);
151
152
hci_dev_unlock(hdev);
153
}
154
155
static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156
{
157
struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158
159
BT_DBG("%s status 0x%x", hdev->name, rp->status);
160
161
if (rp->status)
162
return;
163
164
hdev->link_policy = __le16_to_cpu(rp->policy);
165
}
166
167
static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
168
{
169
__u8 status = *((__u8 *) skb->data);
170
void *sent;
171
172
BT_DBG("%s status 0x%x", hdev->name, status);
173
174
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
175
if (!sent)
176
return;
177
178
if (!status)
179
hdev->link_policy = get_unaligned_le16(sent);
180
181
hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
182
}
183
184
static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185
{
186
__u8 status = *((__u8 *) skb->data);
187
188
BT_DBG("%s status 0x%x", hdev->name, status);
189
190
clear_bit(HCI_RESET, &hdev->flags);
191
192
hci_req_complete(hdev, HCI_OP_RESET, status);
193
}
194
195
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
196
{
197
__u8 status = *((__u8 *) skb->data);
198
void *sent;
199
200
BT_DBG("%s status 0x%x", hdev->name, status);
201
202
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
203
if (!sent)
204
return;
205
206
if (test_bit(HCI_MGMT, &hdev->flags))
207
mgmt_set_local_name_complete(hdev->id, sent, status);
208
209
if (status)
210
return;
211
212
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
213
}
214
215
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216
{
217
struct hci_rp_read_local_name *rp = (void *) skb->data;
218
219
BT_DBG("%s status 0x%x", hdev->name, rp->status);
220
221
if (rp->status)
222
return;
223
224
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
225
}
226
227
static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
228
{
229
__u8 status = *((__u8 *) skb->data);
230
void *sent;
231
232
BT_DBG("%s status 0x%x", hdev->name, status);
233
234
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
235
if (!sent)
236
return;
237
238
if (!status) {
239
__u8 param = *((__u8 *) sent);
240
241
if (param == AUTH_ENABLED)
242
set_bit(HCI_AUTH, &hdev->flags);
243
else
244
clear_bit(HCI_AUTH, &hdev->flags);
245
}
246
247
hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
248
}
249
250
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
251
{
252
__u8 status = *((__u8 *) skb->data);
253
void *sent;
254
255
BT_DBG("%s status 0x%x", hdev->name, status);
256
257
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
258
if (!sent)
259
return;
260
261
if (!status) {
262
__u8 param = *((__u8 *) sent);
263
264
if (param)
265
set_bit(HCI_ENCRYPT, &hdev->flags);
266
else
267
clear_bit(HCI_ENCRYPT, &hdev->flags);
268
}
269
270
hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
271
}
272
273
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
274
{
275
__u8 status = *((__u8 *) skb->data);
276
void *sent;
277
278
BT_DBG("%s status 0x%x", hdev->name, status);
279
280
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
281
if (!sent)
282
return;
283
284
if (!status) {
285
__u8 param = *((__u8 *) sent);
286
int old_pscan, old_iscan;
287
288
old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
289
old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
290
291
if (param & SCAN_INQUIRY) {
292
set_bit(HCI_ISCAN, &hdev->flags);
293
if (!old_iscan)
294
mgmt_discoverable(hdev->id, 1);
295
} else if (old_iscan)
296
mgmt_discoverable(hdev->id, 0);
297
298
if (param & SCAN_PAGE) {
299
set_bit(HCI_PSCAN, &hdev->flags);
300
if (!old_pscan)
301
mgmt_connectable(hdev->id, 1);
302
} else if (old_pscan)
303
mgmt_connectable(hdev->id, 0);
304
}
305
306
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
307
}
308
309
static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
310
{
311
struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
312
313
BT_DBG("%s status 0x%x", hdev->name, rp->status);
314
315
if (rp->status)
316
return;
317
318
memcpy(hdev->dev_class, rp->dev_class, 3);
319
320
BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
321
hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
322
}
323
324
static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
325
{
326
__u8 status = *((__u8 *) skb->data);
327
void *sent;
328
329
BT_DBG("%s status 0x%x", hdev->name, status);
330
331
if (status)
332
return;
333
334
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
335
if (!sent)
336
return;
337
338
memcpy(hdev->dev_class, sent, 3);
339
}
340
341
static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
342
{
343
struct hci_rp_read_voice_setting *rp = (void *) skb->data;
344
__u16 setting;
345
346
BT_DBG("%s status 0x%x", hdev->name, rp->status);
347
348
if (rp->status)
349
return;
350
351
setting = __le16_to_cpu(rp->voice_setting);
352
353
if (hdev->voice_setting == setting)
354
return;
355
356
hdev->voice_setting = setting;
357
358
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
359
360
if (hdev->notify) {
361
tasklet_disable(&hdev->tx_task);
362
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
363
tasklet_enable(&hdev->tx_task);
364
}
365
}
366
367
static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
368
{
369
__u8 status = *((__u8 *) skb->data);
370
__u16 setting;
371
void *sent;
372
373
BT_DBG("%s status 0x%x", hdev->name, status);
374
375
if (status)
376
return;
377
378
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
379
if (!sent)
380
return;
381
382
setting = get_unaligned_le16(sent);
383
384
if (hdev->voice_setting == setting)
385
return;
386
387
hdev->voice_setting = setting;
388
389
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
390
391
if (hdev->notify) {
392
tasklet_disable(&hdev->tx_task);
393
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394
tasklet_enable(&hdev->tx_task);
395
}
396
}
397
398
static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
399
{
400
__u8 status = *((__u8 *) skb->data);
401
402
BT_DBG("%s status 0x%x", hdev->name, status);
403
404
hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
405
}
406
407
static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
408
{
409
struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
410
411
BT_DBG("%s status 0x%x", hdev->name, rp->status);
412
413
if (rp->status)
414
return;
415
416
hdev->ssp_mode = rp->mode;
417
}
418
419
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
420
{
421
__u8 status = *((__u8 *) skb->data);
422
void *sent;
423
424
BT_DBG("%s status 0x%x", hdev->name, status);
425
426
if (status)
427
return;
428
429
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
430
if (!sent)
431
return;
432
433
hdev->ssp_mode = *((__u8 *) sent);
434
}
435
436
static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
437
{
438
if (hdev->features[6] & LMP_EXT_INQ)
439
return 2;
440
441
if (hdev->features[3] & LMP_RSSI_INQ)
442
return 1;
443
444
if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
445
hdev->lmp_subver == 0x0757)
446
return 1;
447
448
if (hdev->manufacturer == 15) {
449
if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
450
return 1;
451
if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
452
return 1;
453
if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
454
return 1;
455
}
456
457
if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
458
hdev->lmp_subver == 0x1805)
459
return 1;
460
461
return 0;
462
}
463
464
static void hci_setup_inquiry_mode(struct hci_dev *hdev)
465
{
466
u8 mode;
467
468
mode = hci_get_inquiry_mode(hdev);
469
470
hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
471
}
472
473
static void hci_setup_event_mask(struct hci_dev *hdev)
474
{
475
/* The second byte is 0xff instead of 0x9f (two reserved bits
476
* disabled) since a Broadcom 1.2 dongle doesn't respond to the
477
* command otherwise */
478
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
479
480
/* CSR 1.1 dongles does not accept any bitfield so don't try to set
481
* any event mask for pre 1.2 devices */
482
if (hdev->lmp_ver <= 1)
483
return;
484
485
events[4] |= 0x01; /* Flow Specification Complete */
486
events[4] |= 0x02; /* Inquiry Result with RSSI */
487
events[4] |= 0x04; /* Read Remote Extended Features Complete */
488
events[5] |= 0x08; /* Synchronous Connection Complete */
489
events[5] |= 0x10; /* Synchronous Connection Changed */
490
491
if (hdev->features[3] & LMP_RSSI_INQ)
492
events[4] |= 0x04; /* Inquiry Result with RSSI */
493
494
if (hdev->features[5] & LMP_SNIFF_SUBR)
495
events[5] |= 0x20; /* Sniff Subrating */
496
497
if (hdev->features[5] & LMP_PAUSE_ENC)
498
events[5] |= 0x80; /* Encryption Key Refresh Complete */
499
500
if (hdev->features[6] & LMP_EXT_INQ)
501
events[5] |= 0x40; /* Extended Inquiry Result */
502
503
if (hdev->features[6] & LMP_NO_FLUSH)
504
events[7] |= 0x01; /* Enhanced Flush Complete */
505
506
if (hdev->features[7] & LMP_LSTO)
507
events[6] |= 0x80; /* Link Supervision Timeout Changed */
508
509
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
510
events[6] |= 0x01; /* IO Capability Request */
511
events[6] |= 0x02; /* IO Capability Response */
512
events[6] |= 0x04; /* User Confirmation Request */
513
events[6] |= 0x08; /* User Passkey Request */
514
events[6] |= 0x10; /* Remote OOB Data Request */
515
events[6] |= 0x20; /* Simple Pairing Complete */
516
events[7] |= 0x04; /* User Passkey Notification */
517
events[7] |= 0x08; /* Keypress Notification */
518
events[7] |= 0x10; /* Remote Host Supported
519
* Features Notification */
520
}
521
522
if (hdev->features[4] & LMP_LE)
523
events[7] |= 0x20; /* LE Meta-Event */
524
525
hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
526
}
527
528
static void hci_setup(struct hci_dev *hdev)
529
{
530
hci_setup_event_mask(hdev);
531
532
if (hdev->lmp_ver > 1)
533
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534
535
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
536
u8 mode = 0x01;
537
hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
538
}
539
540
if (hdev->features[3] & LMP_RSSI_INQ)
541
hci_setup_inquiry_mode(hdev);
542
543
if (hdev->features[7] & LMP_INQ_TX_PWR)
544
hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
545
}
546
547
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
548
{
549
struct hci_rp_read_local_version *rp = (void *) skb->data;
550
551
BT_DBG("%s status 0x%x", hdev->name, rp->status);
552
553
if (rp->status)
554
return;
555
556
hdev->hci_ver = rp->hci_ver;
557
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
558
hdev->lmp_ver = rp->lmp_ver;
559
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
560
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
561
562
BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
563
hdev->manufacturer,
564
hdev->hci_ver, hdev->hci_rev);
565
566
if (test_bit(HCI_INIT, &hdev->flags))
567
hci_setup(hdev);
568
}
569
570
static void hci_setup_link_policy(struct hci_dev *hdev)
571
{
572
u16 link_policy = 0;
573
574
if (hdev->features[0] & LMP_RSWITCH)
575
link_policy |= HCI_LP_RSWITCH;
576
if (hdev->features[0] & LMP_HOLD)
577
link_policy |= HCI_LP_HOLD;
578
if (hdev->features[0] & LMP_SNIFF)
579
link_policy |= HCI_LP_SNIFF;
580
if (hdev->features[1] & LMP_PARK)
581
link_policy |= HCI_LP_PARK;
582
583
link_policy = cpu_to_le16(link_policy);
584
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
585
sizeof(link_policy), &link_policy);
586
}
587
588
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
589
{
590
struct hci_rp_read_local_commands *rp = (void *) skb->data;
591
592
BT_DBG("%s status 0x%x", hdev->name, rp->status);
593
594
if (rp->status)
595
goto done;
596
597
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
598
599
if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
600
hci_setup_link_policy(hdev);
601
602
done:
603
hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
604
}
605
606
static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
607
{
608
struct hci_rp_read_local_features *rp = (void *) skb->data;
609
610
BT_DBG("%s status 0x%x", hdev->name, rp->status);
611
612
if (rp->status)
613
return;
614
615
memcpy(hdev->features, rp->features, 8);
616
617
/* Adjust default settings according to features
618
* supported by device. */
619
620
if (hdev->features[0] & LMP_3SLOT)
621
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
622
623
if (hdev->features[0] & LMP_5SLOT)
624
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
625
626
if (hdev->features[1] & LMP_HV2) {
627
hdev->pkt_type |= (HCI_HV2);
628
hdev->esco_type |= (ESCO_HV2);
629
}
630
631
if (hdev->features[1] & LMP_HV3) {
632
hdev->pkt_type |= (HCI_HV3);
633
hdev->esco_type |= (ESCO_HV3);
634
}
635
636
if (hdev->features[3] & LMP_ESCO)
637
hdev->esco_type |= (ESCO_EV3);
638
639
if (hdev->features[4] & LMP_EV4)
640
hdev->esco_type |= (ESCO_EV4);
641
642
if (hdev->features[4] & LMP_EV5)
643
hdev->esco_type |= (ESCO_EV5);
644
645
if (hdev->features[5] & LMP_EDR_ESCO_2M)
646
hdev->esco_type |= (ESCO_2EV3);
647
648
if (hdev->features[5] & LMP_EDR_ESCO_3M)
649
hdev->esco_type |= (ESCO_3EV3);
650
651
if (hdev->features[5] & LMP_EDR_3S_ESCO)
652
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
653
654
BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
655
hdev->features[0], hdev->features[1],
656
hdev->features[2], hdev->features[3],
657
hdev->features[4], hdev->features[5],
658
hdev->features[6], hdev->features[7]);
659
}
660
661
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
662
{
663
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
664
665
BT_DBG("%s status 0x%x", hdev->name, rp->status);
666
667
if (rp->status)
668
return;
669
670
hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
671
hdev->sco_mtu = rp->sco_mtu;
672
hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
673
hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
674
675
if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
676
hdev->sco_mtu = 64;
677
hdev->sco_pkts = 8;
678
}
679
680
hdev->acl_cnt = hdev->acl_pkts;
681
hdev->sco_cnt = hdev->sco_pkts;
682
683
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
684
hdev->acl_mtu, hdev->acl_pkts,
685
hdev->sco_mtu, hdev->sco_pkts);
686
}
687
688
static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
689
{
690
struct hci_rp_read_bd_addr *rp = (void *) skb->data;
691
692
BT_DBG("%s status 0x%x", hdev->name, rp->status);
693
694
if (!rp->status)
695
bacpy(&hdev->bdaddr, &rp->bdaddr);
696
697
hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
698
}
699
700
static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
701
{
702
__u8 status = *((__u8 *) skb->data);
703
704
BT_DBG("%s status 0x%x", hdev->name, status);
705
706
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
707
}
708
709
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
710
struct sk_buff *skb)
711
{
712
__u8 status = *((__u8 *) skb->data);
713
714
BT_DBG("%s status 0x%x", hdev->name, status);
715
716
hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
717
}
718
719
static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
720
{
721
__u8 status = *((__u8 *) skb->data);
722
723
BT_DBG("%s status 0x%x", hdev->name, status);
724
725
hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
726
}
727
728
static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
729
struct sk_buff *skb)
730
{
731
__u8 status = *((__u8 *) skb->data);
732
733
BT_DBG("%s status 0x%x", hdev->name, status);
734
735
hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
736
}
737
738
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
739
struct sk_buff *skb)
740
{
741
__u8 status = *((__u8 *) skb->data);
742
743
BT_DBG("%s status 0x%x", hdev->name, status);
744
745
hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
746
}
747
748
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
749
{
750
__u8 status = *((__u8 *) skb->data);
751
752
BT_DBG("%s status 0x%x", hdev->name, status);
753
754
hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
755
}
756
757
static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
758
{
759
struct hci_rp_pin_code_reply *rp = (void *) skb->data;
760
struct hci_cp_pin_code_reply *cp;
761
struct hci_conn *conn;
762
763
BT_DBG("%s status 0x%x", hdev->name, rp->status);
764
765
if (test_bit(HCI_MGMT, &hdev->flags))
766
mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
767
768
if (rp->status != 0)
769
return;
770
771
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
772
if (!cp)
773
return;
774
775
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
776
if (conn)
777
conn->pin_length = cp->pin_len;
778
}
779
780
static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
781
{
782
struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
783
784
BT_DBG("%s status 0x%x", hdev->name, rp->status);
785
786
if (test_bit(HCI_MGMT, &hdev->flags))
787
mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
788
rp->status);
789
}
790
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
791
struct sk_buff *skb)
792
{
793
struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
794
795
BT_DBG("%s status 0x%x", hdev->name, rp->status);
796
797
if (rp->status)
798
return;
799
800
hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
801
hdev->le_pkts = rp->le_max_pkt;
802
803
hdev->le_cnt = hdev->le_pkts;
804
805
BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
806
807
hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
808
}
809
810
static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
811
{
812
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
813
814
BT_DBG("%s status 0x%x", hdev->name, rp->status);
815
816
if (test_bit(HCI_MGMT, &hdev->flags))
817
mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
818
rp->status);
819
}
820
821
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
822
struct sk_buff *skb)
823
{
824
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
825
826
BT_DBG("%s status 0x%x", hdev->name, rp->status);
827
828
if (test_bit(HCI_MGMT, &hdev->flags))
829
mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
830
rp->status);
831
}
832
833
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
834
struct sk_buff *skb)
835
{
836
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
837
838
BT_DBG("%s status 0x%x", hdev->name, rp->status);
839
840
mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
841
rp->randomizer, rp->status);
842
}
843
844
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
845
{
846
BT_DBG("%s status 0x%x", hdev->name, status);
847
848
if (status) {
849
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
850
hci_conn_check_pending(hdev);
851
return;
852
}
853
854
if (test_bit(HCI_MGMT, &hdev->flags) &&
855
!test_and_set_bit(HCI_INQUIRY,
856
&hdev->flags))
857
mgmt_discovering(hdev->id, 1);
858
}
859
860
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
861
{
862
struct hci_cp_create_conn *cp;
863
struct hci_conn *conn;
864
865
BT_DBG("%s status 0x%x", hdev->name, status);
866
867
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
868
if (!cp)
869
return;
870
871
hci_dev_lock(hdev);
872
873
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
874
875
BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
876
877
if (status) {
878
if (conn && conn->state == BT_CONNECT) {
879
if (status != 0x0c || conn->attempt > 2) {
880
conn->state = BT_CLOSED;
881
hci_proto_connect_cfm(conn, status);
882
hci_conn_del(conn);
883
} else
884
conn->state = BT_CONNECT2;
885
}
886
} else {
887
if (!conn) {
888
conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
889
if (conn) {
890
conn->out = 1;
891
conn->link_mode |= HCI_LM_MASTER;
892
} else
893
BT_ERR("No memory for new connection");
894
}
895
}
896
897
hci_dev_unlock(hdev);
898
}
899
900
static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
901
{
902
struct hci_cp_add_sco *cp;
903
struct hci_conn *acl, *sco;
904
__u16 handle;
905
906
BT_DBG("%s status 0x%x", hdev->name, status);
907
908
if (!status)
909
return;
910
911
cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
912
if (!cp)
913
return;
914
915
handle = __le16_to_cpu(cp->handle);
916
917
BT_DBG("%s handle %d", hdev->name, handle);
918
919
hci_dev_lock(hdev);
920
921
acl = hci_conn_hash_lookup_handle(hdev, handle);
922
if (acl) {
923
sco = acl->link;
924
if (sco) {
925
sco->state = BT_CLOSED;
926
927
hci_proto_connect_cfm(sco, status);
928
hci_conn_del(sco);
929
}
930
}
931
932
hci_dev_unlock(hdev);
933
}
934
935
static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
936
{
937
struct hci_cp_auth_requested *cp;
938
struct hci_conn *conn;
939
940
BT_DBG("%s status 0x%x", hdev->name, status);
941
942
if (!status)
943
return;
944
945
cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
946
if (!cp)
947
return;
948
949
hci_dev_lock(hdev);
950
951
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
952
if (conn) {
953
if (conn->state == BT_CONFIG) {
954
hci_proto_connect_cfm(conn, status);
955
hci_conn_put(conn);
956
}
957
}
958
959
hci_dev_unlock(hdev);
960
}
961
962
static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
963
{
964
struct hci_cp_set_conn_encrypt *cp;
965
struct hci_conn *conn;
966
967
BT_DBG("%s status 0x%x", hdev->name, status);
968
969
if (!status)
970
return;
971
972
cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
973
if (!cp)
974
return;
975
976
hci_dev_lock(hdev);
977
978
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
979
if (conn) {
980
if (conn->state == BT_CONFIG) {
981
hci_proto_connect_cfm(conn, status);
982
hci_conn_put(conn);
983
}
984
}
985
986
hci_dev_unlock(hdev);
987
}
988
989
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
990
struct hci_conn *conn)
991
{
992
if (conn->state != BT_CONFIG || !conn->out)
993
return 0;
994
995
if (conn->pending_sec_level == BT_SECURITY_SDP)
996
return 0;
997
998
/* Only request authentication for SSP connections or non-SSP
999
* devices with sec_level HIGH */
1000
if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1001
conn->pending_sec_level != BT_SECURITY_HIGH)
1002
return 0;
1003
1004
return 1;
1005
}
1006
1007
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1008
{
1009
struct hci_cp_remote_name_req *cp;
1010
struct hci_conn *conn;
1011
1012
BT_DBG("%s status 0x%x", hdev->name, status);
1013
1014
/* If successful wait for the name req complete event before
1015
* checking for the need to do authentication */
1016
if (!status)
1017
return;
1018
1019
cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1020
if (!cp)
1021
return;
1022
1023
hci_dev_lock(hdev);
1024
1025
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1026
if (!conn)
1027
goto unlock;
1028
1029
if (!hci_outgoing_auth_needed(hdev, conn))
1030
goto unlock;
1031
1032
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1033
struct hci_cp_auth_requested cp;
1034
cp.handle = __cpu_to_le16(conn->handle);
1035
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1036
}
1037
1038
unlock:
1039
hci_dev_unlock(hdev);
1040
}
1041
1042
static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1043
{
1044
struct hci_cp_read_remote_features *cp;
1045
struct hci_conn *conn;
1046
1047
BT_DBG("%s status 0x%x", hdev->name, status);
1048
1049
if (!status)
1050
return;
1051
1052
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1053
if (!cp)
1054
return;
1055
1056
hci_dev_lock(hdev);
1057
1058
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1059
if (conn) {
1060
if (conn->state == BT_CONFIG) {
1061
hci_proto_connect_cfm(conn, status);
1062
hci_conn_put(conn);
1063
}
1064
}
1065
1066
hci_dev_unlock(hdev);
1067
}
1068
1069
static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1070
{
1071
struct hci_cp_read_remote_ext_features *cp;
1072
struct hci_conn *conn;
1073
1074
BT_DBG("%s status 0x%x", hdev->name, status);
1075
1076
if (!status)
1077
return;
1078
1079
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1080
if (!cp)
1081
return;
1082
1083
hci_dev_lock(hdev);
1084
1085
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1086
if (conn) {
1087
if (conn->state == BT_CONFIG) {
1088
hci_proto_connect_cfm(conn, status);
1089
hci_conn_put(conn);
1090
}
1091
}
1092
1093
hci_dev_unlock(hdev);
1094
}
1095
1096
static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1097
{
1098
struct hci_cp_setup_sync_conn *cp;
1099
struct hci_conn *acl, *sco;
1100
__u16 handle;
1101
1102
BT_DBG("%s status 0x%x", hdev->name, status);
1103
1104
if (!status)
1105
return;
1106
1107
cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1108
if (!cp)
1109
return;
1110
1111
handle = __le16_to_cpu(cp->handle);
1112
1113
BT_DBG("%s handle %d", hdev->name, handle);
1114
1115
hci_dev_lock(hdev);
1116
1117
acl = hci_conn_hash_lookup_handle(hdev, handle);
1118
if (acl) {
1119
sco = acl->link;
1120
if (sco) {
1121
sco->state = BT_CLOSED;
1122
1123
hci_proto_connect_cfm(sco, status);
1124
hci_conn_del(sco);
1125
}
1126
}
1127
1128
hci_dev_unlock(hdev);
1129
}
1130
1131
static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1132
{
1133
struct hci_cp_sniff_mode *cp;
1134
struct hci_conn *conn;
1135
1136
BT_DBG("%s status 0x%x", hdev->name, status);
1137
1138
if (!status)
1139
return;
1140
1141
cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1142
if (!cp)
1143
return;
1144
1145
hci_dev_lock(hdev);
1146
1147
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1148
if (conn) {
1149
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1150
1151
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1152
hci_sco_setup(conn, status);
1153
}
1154
1155
hci_dev_unlock(hdev);
1156
}
1157
1158
static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1159
{
1160
struct hci_cp_exit_sniff_mode *cp;
1161
struct hci_conn *conn;
1162
1163
BT_DBG("%s status 0x%x", hdev->name, status);
1164
1165
if (!status)
1166
return;
1167
1168
cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1169
if (!cp)
1170
return;
1171
1172
hci_dev_lock(hdev);
1173
1174
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1175
if (conn) {
1176
clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1177
1178
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1179
hci_sco_setup(conn, status);
1180
}
1181
1182
hci_dev_unlock(hdev);
1183
}
1184
1185
static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1186
{
1187
struct hci_cp_le_create_conn *cp;
1188
struct hci_conn *conn;
1189
1190
BT_DBG("%s status 0x%x", hdev->name, status);
1191
1192
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1193
if (!cp)
1194
return;
1195
1196
hci_dev_lock(hdev);
1197
1198
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1199
1200
BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1201
conn);
1202
1203
if (status) {
1204
if (conn && conn->state == BT_CONNECT) {
1205
conn->state = BT_CLOSED;
1206
hci_proto_connect_cfm(conn, status);
1207
hci_conn_del(conn);
1208
}
1209
} else {
1210
if (!conn) {
1211
conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1212
if (conn)
1213
conn->out = 1;
1214
else
1215
BT_ERR("No memory for new connection");
1216
}
1217
}
1218
1219
hci_dev_unlock(hdev);
1220
}
1221
1222
static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1223
{
1224
__u8 status = *((__u8 *) skb->data);
1225
1226
BT_DBG("%s status %d", hdev->name, status);
1227
1228
if (test_bit(HCI_MGMT, &hdev->flags) &&
1229
test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1230
mgmt_discovering(hdev->id, 0);
1231
1232
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1233
1234
hci_conn_check_pending(hdev);
1235
}
1236
1237
static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1238
{
1239
struct inquiry_data data;
1240
struct inquiry_info *info = (void *) (skb->data + 1);
1241
int num_rsp = *((__u8 *) skb->data);
1242
1243
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1244
1245
if (!num_rsp)
1246
return;
1247
1248
hci_dev_lock(hdev);
1249
1250
if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1251
1252
if (test_bit(HCI_MGMT, &hdev->flags))
1253
mgmt_discovering(hdev->id, 1);
1254
}
1255
1256
for (; num_rsp; num_rsp--, info++) {
1257
bacpy(&data.bdaddr, &info->bdaddr);
1258
data.pscan_rep_mode = info->pscan_rep_mode;
1259
data.pscan_period_mode = info->pscan_period_mode;
1260
data.pscan_mode = info->pscan_mode;
1261
memcpy(data.dev_class, info->dev_class, 3);
1262
data.clock_offset = info->clock_offset;
1263
data.rssi = 0x00;
1264
data.ssp_mode = 0x00;
1265
hci_inquiry_cache_update(hdev, &data);
1266
mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1267
NULL);
1268
}
1269
1270
hci_dev_unlock(hdev);
1271
}
1272
1273
static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1274
{
1275
struct hci_ev_conn_complete *ev = (void *) skb->data;
1276
struct hci_conn *conn;
1277
1278
BT_DBG("%s", hdev->name);
1279
1280
hci_dev_lock(hdev);
1281
1282
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1283
if (!conn) {
1284
if (ev->link_type != SCO_LINK)
1285
goto unlock;
1286
1287
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1288
if (!conn)
1289
goto unlock;
1290
1291
conn->type = SCO_LINK;
1292
}
1293
1294
if (!ev->status) {
1295
conn->handle = __le16_to_cpu(ev->handle);
1296
1297
if (conn->type == ACL_LINK) {
1298
conn->state = BT_CONFIG;
1299
hci_conn_hold(conn);
1300
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1301
mgmt_connected(hdev->id, &ev->bdaddr);
1302
} else
1303
conn->state = BT_CONNECTED;
1304
1305
hci_conn_hold_device(conn);
1306
hci_conn_add_sysfs(conn);
1307
1308
if (test_bit(HCI_AUTH, &hdev->flags))
1309
conn->link_mode |= HCI_LM_AUTH;
1310
1311
if (test_bit(HCI_ENCRYPT, &hdev->flags))
1312
conn->link_mode |= HCI_LM_ENCRYPT;
1313
1314
/* Get remote features */
1315
if (conn->type == ACL_LINK) {
1316
struct hci_cp_read_remote_features cp;
1317
cp.handle = ev->handle;
1318
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1319
sizeof(cp), &cp);
1320
}
1321
1322
/* Set packet type for incoming connection */
1323
if (!conn->out && hdev->hci_ver < 3) {
1324
struct hci_cp_change_conn_ptype cp;
1325
cp.handle = ev->handle;
1326
cp.pkt_type = cpu_to_le16(conn->pkt_type);
1327
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1328
sizeof(cp), &cp);
1329
}
1330
} else {
1331
conn->state = BT_CLOSED;
1332
if (conn->type == ACL_LINK)
1333
mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1334
}
1335
1336
if (conn->type == ACL_LINK)
1337
hci_sco_setup(conn, ev->status);
1338
1339
if (ev->status) {
1340
hci_proto_connect_cfm(conn, ev->status);
1341
hci_conn_del(conn);
1342
} else if (ev->link_type != ACL_LINK)
1343
hci_proto_connect_cfm(conn, ev->status);
1344
1345
unlock:
1346
hci_dev_unlock(hdev);
1347
1348
hci_conn_check_pending(hdev);
1349
}
1350
1351
static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1352
{
1353
struct hci_ev_conn_request *ev = (void *) skb->data;
1354
int mask = hdev->link_mode;
1355
1356
BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1357
batostr(&ev->bdaddr), ev->link_type);
1358
1359
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1360
1361
if ((mask & HCI_LM_ACCEPT) &&
1362
!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1363
/* Connection accepted */
1364
struct inquiry_entry *ie;
1365
struct hci_conn *conn;
1366
1367
hci_dev_lock(hdev);
1368
1369
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1370
if (ie)
1371
memcpy(ie->data.dev_class, ev->dev_class, 3);
1372
1373
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1374
if (!conn) {
1375
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1376
if (!conn) {
1377
BT_ERR("No memory for new connection");
1378
hci_dev_unlock(hdev);
1379
return;
1380
}
1381
}
1382
1383
memcpy(conn->dev_class, ev->dev_class, 3);
1384
conn->state = BT_CONNECT;
1385
1386
hci_dev_unlock(hdev);
1387
1388
if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1389
struct hci_cp_accept_conn_req cp;
1390
1391
bacpy(&cp.bdaddr, &ev->bdaddr);
1392
1393
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1394
cp.role = 0x00; /* Become master */
1395
else
1396
cp.role = 0x01; /* Remain slave */
1397
1398
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1399
sizeof(cp), &cp);
1400
} else {
1401
struct hci_cp_accept_sync_conn_req cp;
1402
1403
bacpy(&cp.bdaddr, &ev->bdaddr);
1404
cp.pkt_type = cpu_to_le16(conn->pkt_type);
1405
1406
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1407
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1408
cp.max_latency = cpu_to_le16(0xffff);
1409
cp.content_format = cpu_to_le16(hdev->voice_setting);
1410
cp.retrans_effort = 0xff;
1411
1412
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1413
sizeof(cp), &cp);
1414
}
1415
} else {
1416
/* Connection rejected */
1417
struct hci_cp_reject_conn_req cp;
1418
1419
bacpy(&cp.bdaddr, &ev->bdaddr);
1420
cp.reason = 0x0f;
1421
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1422
}
1423
}
1424
1425
static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1426
{
1427
struct hci_ev_disconn_complete *ev = (void *) skb->data;
1428
struct hci_conn *conn;
1429
1430
BT_DBG("%s status %d", hdev->name, ev->status);
1431
1432
if (ev->status) {
1433
mgmt_disconnect_failed(hdev->id);
1434
return;
1435
}
1436
1437
hci_dev_lock(hdev);
1438
1439
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1440
if (!conn)
1441
goto unlock;
1442
1443
conn->state = BT_CLOSED;
1444
1445
if (conn->type == ACL_LINK || conn->type == LE_LINK)
1446
mgmt_disconnected(hdev->id, &conn->dst);
1447
1448
hci_proto_disconn_cfm(conn, ev->reason);
1449
hci_conn_del(conn);
1450
1451
unlock:
1452
hci_dev_unlock(hdev);
1453
}
1454
1455
static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1456
{
1457
struct hci_ev_auth_complete *ev = (void *) skb->data;
1458
struct hci_conn *conn;
1459
1460
BT_DBG("%s status %d", hdev->name, ev->status);
1461
1462
hci_dev_lock(hdev);
1463
1464
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1465
if (conn) {
1466
if (!ev->status) {
1467
conn->link_mode |= HCI_LM_AUTH;
1468
conn->sec_level = conn->pending_sec_level;
1469
} else {
1470
mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1471
}
1472
1473
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1474
1475
if (conn->state == BT_CONFIG) {
1476
if (!ev->status && hdev->ssp_mode > 0 &&
1477
conn->ssp_mode > 0) {
1478
struct hci_cp_set_conn_encrypt cp;
1479
cp.handle = ev->handle;
1480
cp.encrypt = 0x01;
1481
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1482
sizeof(cp), &cp);
1483
} else {
1484
conn->state = BT_CONNECTED;
1485
hci_proto_connect_cfm(conn, ev->status);
1486
hci_conn_put(conn);
1487
}
1488
} else {
1489
hci_auth_cfm(conn, ev->status);
1490
1491
hci_conn_hold(conn);
1492
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1493
hci_conn_put(conn);
1494
}
1495
1496
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1497
if (!ev->status) {
1498
struct hci_cp_set_conn_encrypt cp;
1499
cp.handle = ev->handle;
1500
cp.encrypt = 0x01;
1501
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1502
sizeof(cp), &cp);
1503
} else {
1504
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1505
hci_encrypt_cfm(conn, ev->status, 0x00);
1506
}
1507
}
1508
}
1509
1510
hci_dev_unlock(hdev);
1511
}
1512
1513
static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1514
{
1515
struct hci_ev_remote_name *ev = (void *) skb->data;
1516
struct hci_conn *conn;
1517
1518
BT_DBG("%s", hdev->name);
1519
1520
hci_conn_check_pending(hdev);
1521
1522
hci_dev_lock(hdev);
1523
1524
if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1525
mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1526
1527
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1528
if (!conn)
1529
goto unlock;
1530
1531
if (!hci_outgoing_auth_needed(hdev, conn))
1532
goto unlock;
1533
1534
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1535
struct hci_cp_auth_requested cp;
1536
cp.handle = __cpu_to_le16(conn->handle);
1537
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1538
}
1539
1540
unlock:
1541
hci_dev_unlock(hdev);
1542
}
1543
1544
static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1545
{
1546
struct hci_ev_encrypt_change *ev = (void *) skb->data;
1547
struct hci_conn *conn;
1548
1549
BT_DBG("%s status %d", hdev->name, ev->status);
1550
1551
hci_dev_lock(hdev);
1552
1553
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1554
if (conn) {
1555
if (!ev->status) {
1556
if (ev->encrypt) {
1557
/* Encryption implies authentication */
1558
conn->link_mode |= HCI_LM_AUTH;
1559
conn->link_mode |= HCI_LM_ENCRYPT;
1560
} else
1561
conn->link_mode &= ~HCI_LM_ENCRYPT;
1562
}
1563
1564
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1565
1566
if (conn->state == BT_CONFIG) {
1567
if (!ev->status)
1568
conn->state = BT_CONNECTED;
1569
1570
hci_proto_connect_cfm(conn, ev->status);
1571
hci_conn_put(conn);
1572
} else
1573
hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1574
}
1575
1576
hci_dev_unlock(hdev);
1577
}
1578
1579
static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1580
{
1581
struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1582
struct hci_conn *conn;
1583
1584
BT_DBG("%s status %d", hdev->name, ev->status);
1585
1586
hci_dev_lock(hdev);
1587
1588
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1589
if (conn) {
1590
if (!ev->status)
1591
conn->link_mode |= HCI_LM_SECURE;
1592
1593
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1594
1595
hci_key_change_cfm(conn, ev->status);
1596
}
1597
1598
hci_dev_unlock(hdev);
1599
}
1600
1601
static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1602
{
1603
struct hci_ev_remote_features *ev = (void *) skb->data;
1604
struct hci_conn *conn;
1605
1606
BT_DBG("%s status %d", hdev->name, ev->status);
1607
1608
hci_dev_lock(hdev);
1609
1610
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1611
if (!conn)
1612
goto unlock;
1613
1614
if (!ev->status)
1615
memcpy(conn->features, ev->features, 8);
1616
1617
if (conn->state != BT_CONFIG)
1618
goto unlock;
1619
1620
if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1621
struct hci_cp_read_remote_ext_features cp;
1622
cp.handle = ev->handle;
1623
cp.page = 0x01;
1624
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1625
sizeof(cp), &cp);
1626
goto unlock;
1627
}
1628
1629
if (!ev->status) {
1630
struct hci_cp_remote_name_req cp;
1631
memset(&cp, 0, sizeof(cp));
1632
bacpy(&cp.bdaddr, &conn->dst);
1633
cp.pscan_rep_mode = 0x02;
1634
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1635
}
1636
1637
if (!hci_outgoing_auth_needed(hdev, conn)) {
1638
conn->state = BT_CONNECTED;
1639
hci_proto_connect_cfm(conn, ev->status);
1640
hci_conn_put(conn);
1641
}
1642
1643
unlock:
1644
hci_dev_unlock(hdev);
1645
}
1646
1647
static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1648
{
1649
BT_DBG("%s", hdev->name);
1650
}
1651
1652
static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1653
{
1654
BT_DBG("%s", hdev->name);
1655
}
1656
1657
static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1658
{
1659
struct hci_ev_cmd_complete *ev = (void *) skb->data;
1660
__u16 opcode;
1661
1662
skb_pull(skb, sizeof(*ev));
1663
1664
opcode = __le16_to_cpu(ev->opcode);
1665
1666
switch (opcode) {
1667
case HCI_OP_INQUIRY_CANCEL:
1668
hci_cc_inquiry_cancel(hdev, skb);
1669
break;
1670
1671
case HCI_OP_EXIT_PERIODIC_INQ:
1672
hci_cc_exit_periodic_inq(hdev, skb);
1673
break;
1674
1675
case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1676
hci_cc_remote_name_req_cancel(hdev, skb);
1677
break;
1678
1679
case HCI_OP_ROLE_DISCOVERY:
1680
hci_cc_role_discovery(hdev, skb);
1681
break;
1682
1683
case HCI_OP_READ_LINK_POLICY:
1684
hci_cc_read_link_policy(hdev, skb);
1685
break;
1686
1687
case HCI_OP_WRITE_LINK_POLICY:
1688
hci_cc_write_link_policy(hdev, skb);
1689
break;
1690
1691
case HCI_OP_READ_DEF_LINK_POLICY:
1692
hci_cc_read_def_link_policy(hdev, skb);
1693
break;
1694
1695
case HCI_OP_WRITE_DEF_LINK_POLICY:
1696
hci_cc_write_def_link_policy(hdev, skb);
1697
break;
1698
1699
case HCI_OP_RESET:
1700
hci_cc_reset(hdev, skb);
1701
break;
1702
1703
case HCI_OP_WRITE_LOCAL_NAME:
1704
hci_cc_write_local_name(hdev, skb);
1705
break;
1706
1707
case HCI_OP_READ_LOCAL_NAME:
1708
hci_cc_read_local_name(hdev, skb);
1709
break;
1710
1711
case HCI_OP_WRITE_AUTH_ENABLE:
1712
hci_cc_write_auth_enable(hdev, skb);
1713
break;
1714
1715
case HCI_OP_WRITE_ENCRYPT_MODE:
1716
hci_cc_write_encrypt_mode(hdev, skb);
1717
break;
1718
1719
case HCI_OP_WRITE_SCAN_ENABLE:
1720
hci_cc_write_scan_enable(hdev, skb);
1721
break;
1722
1723
case HCI_OP_READ_CLASS_OF_DEV:
1724
hci_cc_read_class_of_dev(hdev, skb);
1725
break;
1726
1727
case HCI_OP_WRITE_CLASS_OF_DEV:
1728
hci_cc_write_class_of_dev(hdev, skb);
1729
break;
1730
1731
case HCI_OP_READ_VOICE_SETTING:
1732
hci_cc_read_voice_setting(hdev, skb);
1733
break;
1734
1735
case HCI_OP_WRITE_VOICE_SETTING:
1736
hci_cc_write_voice_setting(hdev, skb);
1737
break;
1738
1739
case HCI_OP_HOST_BUFFER_SIZE:
1740
hci_cc_host_buffer_size(hdev, skb);
1741
break;
1742
1743
case HCI_OP_READ_SSP_MODE:
1744
hci_cc_read_ssp_mode(hdev, skb);
1745
break;
1746
1747
case HCI_OP_WRITE_SSP_MODE:
1748
hci_cc_write_ssp_mode(hdev, skb);
1749
break;
1750
1751
case HCI_OP_READ_LOCAL_VERSION:
1752
hci_cc_read_local_version(hdev, skb);
1753
break;
1754
1755
case HCI_OP_READ_LOCAL_COMMANDS:
1756
hci_cc_read_local_commands(hdev, skb);
1757
break;
1758
1759
case HCI_OP_READ_LOCAL_FEATURES:
1760
hci_cc_read_local_features(hdev, skb);
1761
break;
1762
1763
case HCI_OP_READ_BUFFER_SIZE:
1764
hci_cc_read_buffer_size(hdev, skb);
1765
break;
1766
1767
case HCI_OP_READ_BD_ADDR:
1768
hci_cc_read_bd_addr(hdev, skb);
1769
break;
1770
1771
case HCI_OP_WRITE_CA_TIMEOUT:
1772
hci_cc_write_ca_timeout(hdev, skb);
1773
break;
1774
1775
case HCI_OP_DELETE_STORED_LINK_KEY:
1776
hci_cc_delete_stored_link_key(hdev, skb);
1777
break;
1778
1779
case HCI_OP_SET_EVENT_MASK:
1780
hci_cc_set_event_mask(hdev, skb);
1781
break;
1782
1783
case HCI_OP_WRITE_INQUIRY_MODE:
1784
hci_cc_write_inquiry_mode(hdev, skb);
1785
break;
1786
1787
case HCI_OP_READ_INQ_RSP_TX_POWER:
1788
hci_cc_read_inq_rsp_tx_power(hdev, skb);
1789
break;
1790
1791
case HCI_OP_SET_EVENT_FLT:
1792
hci_cc_set_event_flt(hdev, skb);
1793
break;
1794
1795
case HCI_OP_PIN_CODE_REPLY:
1796
hci_cc_pin_code_reply(hdev, skb);
1797
break;
1798
1799
case HCI_OP_PIN_CODE_NEG_REPLY:
1800
hci_cc_pin_code_neg_reply(hdev, skb);
1801
break;
1802
1803
case HCI_OP_READ_LOCAL_OOB_DATA:
1804
hci_cc_read_local_oob_data_reply(hdev, skb);
1805
break;
1806
1807
case HCI_OP_LE_READ_BUFFER_SIZE:
1808
hci_cc_le_read_buffer_size(hdev, skb);
1809
break;
1810
1811
case HCI_OP_USER_CONFIRM_REPLY:
1812
hci_cc_user_confirm_reply(hdev, skb);
1813
break;
1814
1815
case HCI_OP_USER_CONFIRM_NEG_REPLY:
1816
hci_cc_user_confirm_neg_reply(hdev, skb);
1817
break;
1818
1819
default:
1820
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1821
break;
1822
}
1823
1824
if (ev->opcode != HCI_OP_NOP)
1825
del_timer(&hdev->cmd_timer);
1826
1827
if (ev->ncmd) {
1828
atomic_set(&hdev->cmd_cnt, 1);
1829
if (!skb_queue_empty(&hdev->cmd_q))
1830
tasklet_schedule(&hdev->cmd_task);
1831
}
1832
}
1833
1834
static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1835
{
1836
struct hci_ev_cmd_status *ev = (void *) skb->data;
1837
__u16 opcode;
1838
1839
skb_pull(skb, sizeof(*ev));
1840
1841
opcode = __le16_to_cpu(ev->opcode);
1842
1843
switch (opcode) {
1844
case HCI_OP_INQUIRY:
1845
hci_cs_inquiry(hdev, ev->status);
1846
break;
1847
1848
case HCI_OP_CREATE_CONN:
1849
hci_cs_create_conn(hdev, ev->status);
1850
break;
1851
1852
case HCI_OP_ADD_SCO:
1853
hci_cs_add_sco(hdev, ev->status);
1854
break;
1855
1856
case HCI_OP_AUTH_REQUESTED:
1857
hci_cs_auth_requested(hdev, ev->status);
1858
break;
1859
1860
case HCI_OP_SET_CONN_ENCRYPT:
1861
hci_cs_set_conn_encrypt(hdev, ev->status);
1862
break;
1863
1864
case HCI_OP_REMOTE_NAME_REQ:
1865
hci_cs_remote_name_req(hdev, ev->status);
1866
break;
1867
1868
case HCI_OP_READ_REMOTE_FEATURES:
1869
hci_cs_read_remote_features(hdev, ev->status);
1870
break;
1871
1872
case HCI_OP_READ_REMOTE_EXT_FEATURES:
1873
hci_cs_read_remote_ext_features(hdev, ev->status);
1874
break;
1875
1876
case HCI_OP_SETUP_SYNC_CONN:
1877
hci_cs_setup_sync_conn(hdev, ev->status);
1878
break;
1879
1880
case HCI_OP_SNIFF_MODE:
1881
hci_cs_sniff_mode(hdev, ev->status);
1882
break;
1883
1884
case HCI_OP_EXIT_SNIFF_MODE:
1885
hci_cs_exit_sniff_mode(hdev, ev->status);
1886
break;
1887
1888
case HCI_OP_DISCONNECT:
1889
if (ev->status != 0)
1890
mgmt_disconnect_failed(hdev->id);
1891
break;
1892
1893
case HCI_OP_LE_CREATE_CONN:
1894
hci_cs_le_create_conn(hdev, ev->status);
1895
break;
1896
1897
default:
1898
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1899
break;
1900
}
1901
1902
if (ev->opcode != HCI_OP_NOP)
1903
del_timer(&hdev->cmd_timer);
1904
1905
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
1906
atomic_set(&hdev->cmd_cnt, 1);
1907
if (!skb_queue_empty(&hdev->cmd_q))
1908
tasklet_schedule(&hdev->cmd_task);
1909
}
1910
}
1911
1912
static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1913
{
1914
struct hci_ev_role_change *ev = (void *) skb->data;
1915
struct hci_conn *conn;
1916
1917
BT_DBG("%s status %d", hdev->name, ev->status);
1918
1919
hci_dev_lock(hdev);
1920
1921
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1922
if (conn) {
1923
if (!ev->status) {
1924
if (ev->role)
1925
conn->link_mode &= ~HCI_LM_MASTER;
1926
else
1927
conn->link_mode |= HCI_LM_MASTER;
1928
}
1929
1930
clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1931
1932
hci_role_switch_cfm(conn, ev->status, ev->role);
1933
}
1934
1935
hci_dev_unlock(hdev);
1936
}
1937
1938
static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1939
{
1940
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1941
__le16 *ptr;
1942
int i;
1943
1944
skb_pull(skb, sizeof(*ev));
1945
1946
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1947
1948
if (skb->len < ev->num_hndl * 4) {
1949
BT_DBG("%s bad parameters", hdev->name);
1950
return;
1951
}
1952
1953
tasklet_disable(&hdev->tx_task);
1954
1955
for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1956
struct hci_conn *conn;
1957
__u16 handle, count;
1958
1959
handle = get_unaligned_le16(ptr++);
1960
count = get_unaligned_le16(ptr++);
1961
1962
conn = hci_conn_hash_lookup_handle(hdev, handle);
1963
if (conn) {
1964
conn->sent -= count;
1965
1966
if (conn->type == ACL_LINK) {
1967
hdev->acl_cnt += count;
1968
if (hdev->acl_cnt > hdev->acl_pkts)
1969
hdev->acl_cnt = hdev->acl_pkts;
1970
} else if (conn->type == LE_LINK) {
1971
if (hdev->le_pkts) {
1972
hdev->le_cnt += count;
1973
if (hdev->le_cnt > hdev->le_pkts)
1974
hdev->le_cnt = hdev->le_pkts;
1975
} else {
1976
hdev->acl_cnt += count;
1977
if (hdev->acl_cnt > hdev->acl_pkts)
1978
hdev->acl_cnt = hdev->acl_pkts;
1979
}
1980
} else {
1981
hdev->sco_cnt += count;
1982
if (hdev->sco_cnt > hdev->sco_pkts)
1983
hdev->sco_cnt = hdev->sco_pkts;
1984
}
1985
}
1986
}
1987
1988
tasklet_schedule(&hdev->tx_task);
1989
1990
tasklet_enable(&hdev->tx_task);
1991
}
1992
1993
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1994
{
1995
struct hci_ev_mode_change *ev = (void *) skb->data;
1996
struct hci_conn *conn;
1997
1998
BT_DBG("%s status %d", hdev->name, ev->status);
1999
2000
hci_dev_lock(hdev);
2001
2002
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2003
if (conn) {
2004
conn->mode = ev->mode;
2005
conn->interval = __le16_to_cpu(ev->interval);
2006
2007
if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2008
if (conn->mode == HCI_CM_ACTIVE)
2009
conn->power_save = 1;
2010
else
2011
conn->power_save = 0;
2012
}
2013
2014
if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2015
hci_sco_setup(conn, ev->status);
2016
}
2017
2018
hci_dev_unlock(hdev);
2019
}
2020
2021
static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2022
{
2023
struct hci_ev_pin_code_req *ev = (void *) skb->data;
2024
struct hci_conn *conn;
2025
2026
BT_DBG("%s", hdev->name);
2027
2028
hci_dev_lock(hdev);
2029
2030
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2031
if (conn && conn->state == BT_CONNECTED) {
2032
hci_conn_hold(conn);
2033
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2034
hci_conn_put(conn);
2035
}
2036
2037
if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2038
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2039
sizeof(ev->bdaddr), &ev->bdaddr);
2040
else if (test_bit(HCI_MGMT, &hdev->flags)) {
2041
u8 secure;
2042
2043
if (conn->pending_sec_level == BT_SECURITY_HIGH)
2044
secure = 1;
2045
else
2046
secure = 0;
2047
2048
mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2049
}
2050
2051
hci_dev_unlock(hdev);
2052
}
2053
2054
static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2055
{
2056
struct hci_ev_link_key_req *ev = (void *) skb->data;
2057
struct hci_cp_link_key_reply cp;
2058
struct hci_conn *conn;
2059
struct link_key *key;
2060
2061
BT_DBG("%s", hdev->name);
2062
2063
if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2064
return;
2065
2066
hci_dev_lock(hdev);
2067
2068
key = hci_find_link_key(hdev, &ev->bdaddr);
2069
if (!key) {
2070
BT_DBG("%s link key not found for %s", hdev->name,
2071
batostr(&ev->bdaddr));
2072
goto not_found;
2073
}
2074
2075
BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2076
batostr(&ev->bdaddr));
2077
2078
if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2079
key->type == HCI_LK_DEBUG_COMBINATION) {
2080
BT_DBG("%s ignoring debug key", hdev->name);
2081
goto not_found;
2082
}
2083
2084
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2085
if (conn) {
2086
if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2087
conn->auth_type != 0xff &&
2088
(conn->auth_type & 0x01)) {
2089
BT_DBG("%s ignoring unauthenticated key", hdev->name);
2090
goto not_found;
2091
}
2092
2093
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2094
conn->pending_sec_level == BT_SECURITY_HIGH) {
2095
BT_DBG("%s ignoring key unauthenticated for high \
2096
security", hdev->name);
2097
goto not_found;
2098
}
2099
2100
conn->key_type = key->type;
2101
conn->pin_length = key->pin_len;
2102
}
2103
2104
bacpy(&cp.bdaddr, &ev->bdaddr);
2105
memcpy(cp.link_key, key->val, 16);
2106
2107
hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2108
2109
hci_dev_unlock(hdev);
2110
2111
return;
2112
2113
not_found:
2114
hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2115
hci_dev_unlock(hdev);
2116
}
2117
2118
static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2119
{
2120
struct hci_ev_link_key_notify *ev = (void *) skb->data;
2121
struct hci_conn *conn;
2122
u8 pin_len = 0;
2123
2124
BT_DBG("%s", hdev->name);
2125
2126
hci_dev_lock(hdev);
2127
2128
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2129
if (conn) {
2130
hci_conn_hold(conn);
2131
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2132
pin_len = conn->pin_length;
2133
2134
if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2135
conn->key_type = ev->key_type;
2136
2137
hci_conn_put(conn);
2138
}
2139
2140
if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2141
hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2142
ev->key_type, pin_len);
2143
2144
hci_dev_unlock(hdev);
2145
}
2146
2147
static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2148
{
2149
struct hci_ev_clock_offset *ev = (void *) skb->data;
2150
struct hci_conn *conn;
2151
2152
BT_DBG("%s status %d", hdev->name, ev->status);
2153
2154
hci_dev_lock(hdev);
2155
2156
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2157
if (conn && !ev->status) {
2158
struct inquiry_entry *ie;
2159
2160
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2161
if (ie) {
2162
ie->data.clock_offset = ev->clock_offset;
2163
ie->timestamp = jiffies;
2164
}
2165
}
2166
2167
hci_dev_unlock(hdev);
2168
}
2169
2170
static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2171
{
2172
struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2173
struct hci_conn *conn;
2174
2175
BT_DBG("%s status %d", hdev->name, ev->status);
2176
2177
hci_dev_lock(hdev);
2178
2179
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2180
if (conn && !ev->status)
2181
conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2182
2183
hci_dev_unlock(hdev);
2184
}
2185
2186
static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2187
{
2188
struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2189
struct inquiry_entry *ie;
2190
2191
BT_DBG("%s", hdev->name);
2192
2193
hci_dev_lock(hdev);
2194
2195
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2196
if (ie) {
2197
ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2198
ie->timestamp = jiffies;
2199
}
2200
2201
hci_dev_unlock(hdev);
2202
}
2203
2204
static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2205
{
2206
struct inquiry_data data;
2207
int num_rsp = *((__u8 *) skb->data);
2208
2209
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2210
2211
if (!num_rsp)
2212
return;
2213
2214
hci_dev_lock(hdev);
2215
2216
if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2217
2218
if (test_bit(HCI_MGMT, &hdev->flags))
2219
mgmt_discovering(hdev->id, 1);
2220
}
2221
2222
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2223
struct inquiry_info_with_rssi_and_pscan_mode *info;
2224
info = (void *) (skb->data + 1);
2225
2226
for (; num_rsp; num_rsp--, info++) {
2227
bacpy(&data.bdaddr, &info->bdaddr);
2228
data.pscan_rep_mode = info->pscan_rep_mode;
2229
data.pscan_period_mode = info->pscan_period_mode;
2230
data.pscan_mode = info->pscan_mode;
2231
memcpy(data.dev_class, info->dev_class, 3);
2232
data.clock_offset = info->clock_offset;
2233
data.rssi = info->rssi;
2234
data.ssp_mode = 0x00;
2235
hci_inquiry_cache_update(hdev, &data);
2236
mgmt_device_found(hdev->id, &info->bdaddr,
2237
info->dev_class, info->rssi,
2238
NULL);
2239
}
2240
} else {
2241
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2242
2243
for (; num_rsp; num_rsp--, info++) {
2244
bacpy(&data.bdaddr, &info->bdaddr);
2245
data.pscan_rep_mode = info->pscan_rep_mode;
2246
data.pscan_period_mode = info->pscan_period_mode;
2247
data.pscan_mode = 0x00;
2248
memcpy(data.dev_class, info->dev_class, 3);
2249
data.clock_offset = info->clock_offset;
2250
data.rssi = info->rssi;
2251
data.ssp_mode = 0x00;
2252
hci_inquiry_cache_update(hdev, &data);
2253
mgmt_device_found(hdev->id, &info->bdaddr,
2254
info->dev_class, info->rssi,
2255
NULL);
2256
}
2257
}
2258
2259
hci_dev_unlock(hdev);
2260
}
2261
2262
static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2263
{
2264
struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2265
struct hci_conn *conn;
2266
2267
BT_DBG("%s", hdev->name);
2268
2269
hci_dev_lock(hdev);
2270
2271
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2272
if (!conn)
2273
goto unlock;
2274
2275
if (!ev->status && ev->page == 0x01) {
2276
struct inquiry_entry *ie;
2277
2278
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2279
if (ie)
2280
ie->data.ssp_mode = (ev->features[0] & 0x01);
2281
2282
conn->ssp_mode = (ev->features[0] & 0x01);
2283
}
2284
2285
if (conn->state != BT_CONFIG)
2286
goto unlock;
2287
2288
if (!ev->status) {
2289
struct hci_cp_remote_name_req cp;
2290
memset(&cp, 0, sizeof(cp));
2291
bacpy(&cp.bdaddr, &conn->dst);
2292
cp.pscan_rep_mode = 0x02;
2293
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2294
}
2295
2296
if (!hci_outgoing_auth_needed(hdev, conn)) {
2297
conn->state = BT_CONNECTED;
2298
hci_proto_connect_cfm(conn, ev->status);
2299
hci_conn_put(conn);
2300
}
2301
2302
unlock:
2303
hci_dev_unlock(hdev);
2304
}
2305
2306
static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2307
{
2308
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2309
struct hci_conn *conn;
2310
2311
BT_DBG("%s status %d", hdev->name, ev->status);
2312
2313
hci_dev_lock(hdev);
2314
2315
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2316
if (!conn) {
2317
if (ev->link_type == ESCO_LINK)
2318
goto unlock;
2319
2320
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2321
if (!conn)
2322
goto unlock;
2323
2324
conn->type = SCO_LINK;
2325
}
2326
2327
switch (ev->status) {
2328
case 0x00:
2329
conn->handle = __le16_to_cpu(ev->handle);
2330
conn->state = BT_CONNECTED;
2331
2332
hci_conn_hold_device(conn);
2333
hci_conn_add_sysfs(conn);
2334
break;
2335
2336
case 0x11: /* Unsupported Feature or Parameter Value */
2337
case 0x1c: /* SCO interval rejected */
2338
case 0x1a: /* Unsupported Remote Feature */
2339
case 0x1f: /* Unspecified error */
2340
if (conn->out && conn->attempt < 2) {
2341
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2342
(hdev->esco_type & EDR_ESCO_MASK);
2343
hci_setup_sync(conn, conn->link->handle);
2344
goto unlock;
2345
}
2346
/* fall through */
2347
2348
default:
2349
conn->state = BT_CLOSED;
2350
break;
2351
}
2352
2353
hci_proto_connect_cfm(conn, ev->status);
2354
if (ev->status)
2355
hci_conn_del(conn);
2356
2357
unlock:
2358
hci_dev_unlock(hdev);
2359
}
2360
2361
static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2362
{
2363
BT_DBG("%s", hdev->name);
2364
}
2365
2366
static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2367
{
2368
struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2369
2370
BT_DBG("%s status %d", hdev->name, ev->status);
2371
}
2372
2373
static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2374
{
2375
struct inquiry_data data;
2376
struct extended_inquiry_info *info = (void *) (skb->data + 1);
2377
int num_rsp = *((__u8 *) skb->data);
2378
2379
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2380
2381
if (!num_rsp)
2382
return;
2383
2384
if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2385
2386
if (test_bit(HCI_MGMT, &hdev->flags))
2387
mgmt_discovering(hdev->id, 1);
2388
}
2389
2390
hci_dev_lock(hdev);
2391
2392
for (; num_rsp; num_rsp--, info++) {
2393
bacpy(&data.bdaddr, &info->bdaddr);
2394
data.pscan_rep_mode = info->pscan_rep_mode;
2395
data.pscan_period_mode = info->pscan_period_mode;
2396
data.pscan_mode = 0x00;
2397
memcpy(data.dev_class, info->dev_class, 3);
2398
data.clock_offset = info->clock_offset;
2399
data.rssi = info->rssi;
2400
data.ssp_mode = 0x01;
2401
hci_inquiry_cache_update(hdev, &data);
2402
mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2403
info->rssi, info->data);
2404
}
2405
2406
hci_dev_unlock(hdev);
2407
}
2408
2409
static inline u8 hci_get_auth_req(struct hci_conn *conn)
2410
{
2411
/* If remote requests dedicated bonding follow that lead */
2412
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2413
/* If both remote and local IO capabilities allow MITM
2414
* protection then require it, otherwise don't */
2415
if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2416
return 0x02;
2417
else
2418
return 0x03;
2419
}
2420
2421
/* If remote requests no-bonding follow that lead */
2422
if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2423
return conn->remote_auth | (conn->auth_type & 0x01);
2424
2425
return conn->auth_type;
2426
}
2427
2428
static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2429
{
2430
struct hci_ev_io_capa_request *ev = (void *) skb->data;
2431
struct hci_conn *conn;
2432
2433
BT_DBG("%s", hdev->name);
2434
2435
hci_dev_lock(hdev);
2436
2437
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2438
if (!conn)
2439
goto unlock;
2440
2441
hci_conn_hold(conn);
2442
2443
if (!test_bit(HCI_MGMT, &hdev->flags))
2444
goto unlock;
2445
2446
if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2447
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2448
struct hci_cp_io_capability_reply cp;
2449
2450
bacpy(&cp.bdaddr, &ev->bdaddr);
2451
cp.capability = conn->io_capability;
2452
conn->auth_type = hci_get_auth_req(conn);
2453
cp.authentication = conn->auth_type;
2454
2455
if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2456
hci_find_remote_oob_data(hdev, &conn->dst))
2457
cp.oob_data = 0x01;
2458
else
2459
cp.oob_data = 0x00;
2460
2461
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2462
sizeof(cp), &cp);
2463
} else {
2464
struct hci_cp_io_capability_neg_reply cp;
2465
2466
bacpy(&cp.bdaddr, &ev->bdaddr);
2467
cp.reason = 0x18; /* Pairing not allowed */
2468
2469
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2470
sizeof(cp), &cp);
2471
}
2472
2473
unlock:
2474
hci_dev_unlock(hdev);
2475
}
2476
2477
static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2478
{
2479
struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2480
struct hci_conn *conn;
2481
2482
BT_DBG("%s", hdev->name);
2483
2484
hci_dev_lock(hdev);
2485
2486
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2487
if (!conn)
2488
goto unlock;
2489
2490
conn->remote_cap = ev->capability;
2491
conn->remote_oob = ev->oob_data;
2492
conn->remote_auth = ev->authentication;
2493
2494
unlock:
2495
hci_dev_unlock(hdev);
2496
}
2497
2498
static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2499
struct sk_buff *skb)
2500
{
2501
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2502
int loc_mitm, rem_mitm, confirm_hint = 0;
2503
struct hci_conn *conn;
2504
2505
BT_DBG("%s", hdev->name);
2506
2507
hci_dev_lock(hdev);
2508
2509
if (!test_bit(HCI_MGMT, &hdev->flags))
2510
goto unlock;
2511
2512
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2513
if (!conn)
2514
goto unlock;
2515
2516
loc_mitm = (conn->auth_type & 0x01);
2517
rem_mitm = (conn->remote_auth & 0x01);
2518
2519
/* If we require MITM but the remote device can't provide that
2520
* (it has NoInputNoOutput) then reject the confirmation
2521
* request. The only exception is when we're dedicated bonding
2522
* initiators (connect_cfm_cb set) since then we always have the MITM
2523
* bit set. */
2524
if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2525
BT_DBG("Rejecting request: remote device can't provide MITM");
2526
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2527
sizeof(ev->bdaddr), &ev->bdaddr);
2528
goto unlock;
2529
}
2530
2531
/* If no side requires MITM protection; auto-accept */
2532
if ((!loc_mitm || conn->remote_cap == 0x03) &&
2533
(!rem_mitm || conn->io_capability == 0x03)) {
2534
2535
/* If we're not the initiators request authorization to
2536
* proceed from user space (mgmt_user_confirm with
2537
* confirm_hint set to 1). */
2538
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2539
BT_DBG("Confirming auto-accept as acceptor");
2540
confirm_hint = 1;
2541
goto confirm;
2542
}
2543
2544
BT_DBG("Auto-accept of user confirmation with %ums delay",
2545
hdev->auto_accept_delay);
2546
2547
if (hdev->auto_accept_delay > 0) {
2548
int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2549
mod_timer(&conn->auto_accept_timer, jiffies + delay);
2550
goto unlock;
2551
}
2552
2553
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2554
sizeof(ev->bdaddr), &ev->bdaddr);
2555
goto unlock;
2556
}
2557
2558
confirm:
2559
mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2560
confirm_hint);
2561
2562
unlock:
2563
hci_dev_unlock(hdev);
2564
}
2565
2566
static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2567
{
2568
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2569
struct hci_conn *conn;
2570
2571
BT_DBG("%s", hdev->name);
2572
2573
hci_dev_lock(hdev);
2574
2575
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2576
if (!conn)
2577
goto unlock;
2578
2579
/* To avoid duplicate auth_failed events to user space we check
2580
* the HCI_CONN_AUTH_PEND flag which will be set if we
2581
* initiated the authentication. A traditional auth_complete
2582
* event gets always produced as initiator and is also mapped to
2583
* the mgmt_auth_failed event */
2584
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2585
mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2586
2587
hci_conn_put(conn);
2588
2589
unlock:
2590
hci_dev_unlock(hdev);
2591
}
2592
2593
static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2594
{
2595
struct hci_ev_remote_host_features *ev = (void *) skb->data;
2596
struct inquiry_entry *ie;
2597
2598
BT_DBG("%s", hdev->name);
2599
2600
hci_dev_lock(hdev);
2601
2602
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2603
if (ie)
2604
ie->data.ssp_mode = (ev->features[0] & 0x01);
2605
2606
hci_dev_unlock(hdev);
2607
}
2608
2609
static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2610
struct sk_buff *skb)
2611
{
2612
struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2613
struct oob_data *data;
2614
2615
BT_DBG("%s", hdev->name);
2616
2617
hci_dev_lock(hdev);
2618
2619
if (!test_bit(HCI_MGMT, &hdev->flags))
2620
goto unlock;
2621
2622
data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2623
if (data) {
2624
struct hci_cp_remote_oob_data_reply cp;
2625
2626
bacpy(&cp.bdaddr, &ev->bdaddr);
2627
memcpy(cp.hash, data->hash, sizeof(cp.hash));
2628
memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2629
2630
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2631
&cp);
2632
} else {
2633
struct hci_cp_remote_oob_data_neg_reply cp;
2634
2635
bacpy(&cp.bdaddr, &ev->bdaddr);
2636
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2637
&cp);
2638
}
2639
2640
unlock:
2641
hci_dev_unlock(hdev);
2642
}
2643
2644
static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2645
{
2646
struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2647
struct hci_conn *conn;
2648
2649
BT_DBG("%s status %d", hdev->name, ev->status);
2650
2651
hci_dev_lock(hdev);
2652
2653
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2654
if (!conn) {
2655
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2656
if (!conn) {
2657
BT_ERR("No memory for new connection");
2658
hci_dev_unlock(hdev);
2659
return;
2660
}
2661
}
2662
2663
if (ev->status) {
2664
mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
2665
hci_proto_connect_cfm(conn, ev->status);
2666
conn->state = BT_CLOSED;
2667
hci_conn_del(conn);
2668
goto unlock;
2669
}
2670
2671
mgmt_connected(hdev->id, &ev->bdaddr);
2672
2673
conn->handle = __le16_to_cpu(ev->handle);
2674
conn->state = BT_CONNECTED;
2675
2676
hci_conn_hold_device(conn);
2677
hci_conn_add_sysfs(conn);
2678
2679
hci_proto_connect_cfm(conn, ev->status);
2680
2681
unlock:
2682
hci_dev_unlock(hdev);
2683
}
2684
2685
static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2686
{
2687
struct hci_ev_le_meta *le_ev = (void *) skb->data;
2688
2689
skb_pull(skb, sizeof(*le_ev));
2690
2691
switch (le_ev->subevent) {
2692
case HCI_EV_LE_CONN_COMPLETE:
2693
hci_le_conn_complete_evt(hdev, skb);
2694
break;
2695
2696
default:
2697
break;
2698
}
2699
}
2700
2701
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2702
{
2703
struct hci_event_hdr *hdr = (void *) skb->data;
2704
__u8 event = hdr->evt;
2705
2706
skb_pull(skb, HCI_EVENT_HDR_SIZE);
2707
2708
switch (event) {
2709
case HCI_EV_INQUIRY_COMPLETE:
2710
hci_inquiry_complete_evt(hdev, skb);
2711
break;
2712
2713
case HCI_EV_INQUIRY_RESULT:
2714
hci_inquiry_result_evt(hdev, skb);
2715
break;
2716
2717
case HCI_EV_CONN_COMPLETE:
2718
hci_conn_complete_evt(hdev, skb);
2719
break;
2720
2721
case HCI_EV_CONN_REQUEST:
2722
hci_conn_request_evt(hdev, skb);
2723
break;
2724
2725
case HCI_EV_DISCONN_COMPLETE:
2726
hci_disconn_complete_evt(hdev, skb);
2727
break;
2728
2729
case HCI_EV_AUTH_COMPLETE:
2730
hci_auth_complete_evt(hdev, skb);
2731
break;
2732
2733
case HCI_EV_REMOTE_NAME:
2734
hci_remote_name_evt(hdev, skb);
2735
break;
2736
2737
case HCI_EV_ENCRYPT_CHANGE:
2738
hci_encrypt_change_evt(hdev, skb);
2739
break;
2740
2741
case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2742
hci_change_link_key_complete_evt(hdev, skb);
2743
break;
2744
2745
case HCI_EV_REMOTE_FEATURES:
2746
hci_remote_features_evt(hdev, skb);
2747
break;
2748
2749
case HCI_EV_REMOTE_VERSION:
2750
hci_remote_version_evt(hdev, skb);
2751
break;
2752
2753
case HCI_EV_QOS_SETUP_COMPLETE:
2754
hci_qos_setup_complete_evt(hdev, skb);
2755
break;
2756
2757
case HCI_EV_CMD_COMPLETE:
2758
hci_cmd_complete_evt(hdev, skb);
2759
break;
2760
2761
case HCI_EV_CMD_STATUS:
2762
hci_cmd_status_evt(hdev, skb);
2763
break;
2764
2765
case HCI_EV_ROLE_CHANGE:
2766
hci_role_change_evt(hdev, skb);
2767
break;
2768
2769
case HCI_EV_NUM_COMP_PKTS:
2770
hci_num_comp_pkts_evt(hdev, skb);
2771
break;
2772
2773
case HCI_EV_MODE_CHANGE:
2774
hci_mode_change_evt(hdev, skb);
2775
break;
2776
2777
case HCI_EV_PIN_CODE_REQ:
2778
hci_pin_code_request_evt(hdev, skb);
2779
break;
2780
2781
case HCI_EV_LINK_KEY_REQ:
2782
hci_link_key_request_evt(hdev, skb);
2783
break;
2784
2785
case HCI_EV_LINK_KEY_NOTIFY:
2786
hci_link_key_notify_evt(hdev, skb);
2787
break;
2788
2789
case HCI_EV_CLOCK_OFFSET:
2790
hci_clock_offset_evt(hdev, skb);
2791
break;
2792
2793
case HCI_EV_PKT_TYPE_CHANGE:
2794
hci_pkt_type_change_evt(hdev, skb);
2795
break;
2796
2797
case HCI_EV_PSCAN_REP_MODE:
2798
hci_pscan_rep_mode_evt(hdev, skb);
2799
break;
2800
2801
case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
2802
hci_inquiry_result_with_rssi_evt(hdev, skb);
2803
break;
2804
2805
case HCI_EV_REMOTE_EXT_FEATURES:
2806
hci_remote_ext_features_evt(hdev, skb);
2807
break;
2808
2809
case HCI_EV_SYNC_CONN_COMPLETE:
2810
hci_sync_conn_complete_evt(hdev, skb);
2811
break;
2812
2813
case HCI_EV_SYNC_CONN_CHANGED:
2814
hci_sync_conn_changed_evt(hdev, skb);
2815
break;
2816
2817
case HCI_EV_SNIFF_SUBRATE:
2818
hci_sniff_subrate_evt(hdev, skb);
2819
break;
2820
2821
case HCI_EV_EXTENDED_INQUIRY_RESULT:
2822
hci_extended_inquiry_result_evt(hdev, skb);
2823
break;
2824
2825
case HCI_EV_IO_CAPA_REQUEST:
2826
hci_io_capa_request_evt(hdev, skb);
2827
break;
2828
2829
case HCI_EV_IO_CAPA_REPLY:
2830
hci_io_capa_reply_evt(hdev, skb);
2831
break;
2832
2833
case HCI_EV_USER_CONFIRM_REQUEST:
2834
hci_user_confirm_request_evt(hdev, skb);
2835
break;
2836
2837
case HCI_EV_SIMPLE_PAIR_COMPLETE:
2838
hci_simple_pair_complete_evt(hdev, skb);
2839
break;
2840
2841
case HCI_EV_REMOTE_HOST_FEATURES:
2842
hci_remote_host_features_evt(hdev, skb);
2843
break;
2844
2845
case HCI_EV_LE_META:
2846
hci_le_meta_evt(hdev, skb);
2847
break;
2848
2849
case HCI_EV_REMOTE_OOB_DATA_REQUEST:
2850
hci_remote_oob_data_request_evt(hdev, skb);
2851
break;
2852
2853
default:
2854
BT_DBG("%s event 0x%x", hdev->name, event);
2855
break;
2856
}
2857
2858
kfree_skb(skb);
2859
hdev->stat.evt_rx++;
2860
}
2861
2862
/* Generate internal stack event */
2863
void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2864
{
2865
struct hci_event_hdr *hdr;
2866
struct hci_ev_stack_internal *ev;
2867
struct sk_buff *skb;
2868
2869
skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
2870
if (!skb)
2871
return;
2872
2873
hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
2874
hdr->evt = HCI_EV_STACK_INTERNAL;
2875
hdr->plen = sizeof(*ev) + dlen;
2876
2877
ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
2878
ev->type = type;
2879
memcpy(ev->data, data, dlen);
2880
2881
bt_cb(skb)->incoming = 1;
2882
__net_timestamp(skb);
2883
2884
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2885
skb->dev = (void *) hdev;
2886
hci_send_to_sock(hdev, skb, NULL);
2887
kfree_skb(skb);
2888
}
2889
2890