Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/bluetooth/hci_core.c
15109 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (C) 2000-2001 Qualcomm Incorporated
4
5
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
6
7
This program is free software; you can redistribute it and/or modify
8
it under the terms of the GNU General Public License version 2 as
9
published by the Free Software Foundation;
10
11
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22
SOFTWARE IS DISCLAIMED.
23
*/
24
25
/* Bluetooth HCI core. */
26
27
#include <linux/jiffies.h>
28
#include <linux/module.h>
29
#include <linux/kmod.h>
30
31
#include <linux/types.h>
32
#include <linux/errno.h>
33
#include <linux/kernel.h>
34
#include <linux/sched.h>
35
#include <linux/slab.h>
36
#include <linux/poll.h>
37
#include <linux/fcntl.h>
38
#include <linux/init.h>
39
#include <linux/skbuff.h>
40
#include <linux/workqueue.h>
41
#include <linux/interrupt.h>
42
#include <linux/notifier.h>
43
#include <linux/rfkill.h>
44
#include <linux/timer.h>
45
#include <net/sock.h>
46
47
#include <asm/system.h>
48
#include <linux/uaccess.h>
49
#include <asm/unaligned.h>
50
51
#include <net/bluetooth/bluetooth.h>
52
#include <net/bluetooth/hci_core.h>
53
54
#define AUTO_OFF_TIMEOUT 2000
55
56
static void hci_cmd_task(unsigned long arg);
57
static void hci_rx_task(unsigned long arg);
58
static void hci_tx_task(unsigned long arg);
59
60
static DEFINE_RWLOCK(hci_task_lock);
61
62
/* HCI device list */
63
LIST_HEAD(hci_dev_list);
64
DEFINE_RWLOCK(hci_dev_list_lock);
65
66
/* HCI callback list */
67
LIST_HEAD(hci_cb_list);
68
DEFINE_RWLOCK(hci_cb_list_lock);
69
70
/* HCI protocols */
71
#define HCI_MAX_PROTO 2
72
struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74
/* HCI notifiers list */
75
static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77
/* ---- HCI notifications ---- */
78
79
int hci_register_notifier(struct notifier_block *nb)
80
{
81
return atomic_notifier_chain_register(&hci_notifier, nb);
82
}
83
84
int hci_unregister_notifier(struct notifier_block *nb)
85
{
86
return atomic_notifier_chain_unregister(&hci_notifier, nb);
87
}
88
89
static void hci_notify(struct hci_dev *hdev, int event)
90
{
91
atomic_notifier_call_chain(&hci_notifier, event, hdev);
92
}
93
94
/* ---- HCI requests ---- */
95
96
void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97
{
98
BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100
/* If this is the init phase check if the completed command matches
101
* the last init command, and if not just return.
102
*/
103
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104
return;
105
106
if (hdev->req_status == HCI_REQ_PEND) {
107
hdev->req_result = result;
108
hdev->req_status = HCI_REQ_DONE;
109
wake_up_interruptible(&hdev->req_wait_q);
110
}
111
}
112
113
static void hci_req_cancel(struct hci_dev *hdev, int err)
114
{
115
BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117
if (hdev->req_status == HCI_REQ_PEND) {
118
hdev->req_result = err;
119
hdev->req_status = HCI_REQ_CANCELED;
120
wake_up_interruptible(&hdev->req_wait_q);
121
}
122
}
123
124
/* Execute request and wait for completion. */
125
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126
unsigned long opt, __u32 timeout)
127
{
128
DECLARE_WAITQUEUE(wait, current);
129
int err = 0;
130
131
BT_DBG("%s start", hdev->name);
132
133
hdev->req_status = HCI_REQ_PEND;
134
135
add_wait_queue(&hdev->req_wait_q, &wait);
136
set_current_state(TASK_INTERRUPTIBLE);
137
138
req(hdev, opt);
139
schedule_timeout(timeout);
140
141
remove_wait_queue(&hdev->req_wait_q, &wait);
142
143
if (signal_pending(current))
144
return -EINTR;
145
146
switch (hdev->req_status) {
147
case HCI_REQ_DONE:
148
err = -bt_err(hdev->req_result);
149
break;
150
151
case HCI_REQ_CANCELED:
152
err = -hdev->req_result;
153
break;
154
155
default:
156
err = -ETIMEDOUT;
157
break;
158
}
159
160
hdev->req_status = hdev->req_result = 0;
161
162
BT_DBG("%s end: err %d", hdev->name, err);
163
164
return err;
165
}
166
167
static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168
unsigned long opt, __u32 timeout)
169
{
170
int ret;
171
172
if (!test_bit(HCI_UP, &hdev->flags))
173
return -ENETDOWN;
174
175
/* Serialize all requests */
176
hci_req_lock(hdev);
177
ret = __hci_request(hdev, req, opt, timeout);
178
hci_req_unlock(hdev);
179
180
return ret;
181
}
182
183
static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184
{
185
BT_DBG("%s %ld", hdev->name, opt);
186
187
/* Reset device */
188
set_bit(HCI_RESET, &hdev->flags);
189
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190
}
191
192
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193
{
194
struct hci_cp_delete_stored_link_key cp;
195
struct sk_buff *skb;
196
__le16 param;
197
__u8 flt_type;
198
199
BT_DBG("%s %ld", hdev->name, opt);
200
201
/* Driver initialization */
202
203
/* Special commands */
204
while ((skb = skb_dequeue(&hdev->driver_init))) {
205
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
206
skb->dev = (void *) hdev;
207
208
skb_queue_tail(&hdev->cmd_q, skb);
209
tasklet_schedule(&hdev->cmd_task);
210
}
211
skb_queue_purge(&hdev->driver_init);
212
213
/* Mandatory initialization */
214
215
/* Reset */
216
if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217
set_bit(HCI_RESET, &hdev->flags);
218
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
219
}
220
221
/* Read Local Supported Features */
222
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
223
224
/* Read Local Version */
225
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
226
227
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
228
hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
229
230
#if 0
231
/* Host buffer size */
232
{
233
struct hci_cp_host_buffer_size cp;
234
cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
235
cp.sco_mtu = HCI_MAX_SCO_SIZE;
236
cp.acl_max_pkt = cpu_to_le16(0xffff);
237
cp.sco_max_pkt = cpu_to_le16(0xffff);
238
hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
239
}
240
#endif
241
242
/* Read BD Address */
243
hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
244
245
/* Read Class of Device */
246
hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
247
248
/* Read Local Name */
249
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
250
251
/* Read Voice Setting */
252
hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
253
254
/* Optional initialization */
255
256
/* Clear Event Filters */
257
flt_type = HCI_FLT_CLEAR_ALL;
258
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
259
260
/* Connection accept timeout ~20 secs */
261
param = cpu_to_le16(0x7d00);
262
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
263
264
bacpy(&cp.bdaddr, BDADDR_ANY);
265
cp.delete_all = 1;
266
hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
267
}
268
269
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270
{
271
BT_DBG("%s", hdev->name);
272
273
/* Read LE buffer size */
274
hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
275
}
276
277
static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
278
{
279
__u8 scan = opt;
280
281
BT_DBG("%s %x", hdev->name, scan);
282
283
/* Inquiry and Page scans */
284
hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
285
}
286
287
static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
288
{
289
__u8 auth = opt;
290
291
BT_DBG("%s %x", hdev->name, auth);
292
293
/* Authentication */
294
hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
295
}
296
297
static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
298
{
299
__u8 encrypt = opt;
300
301
BT_DBG("%s %x", hdev->name, encrypt);
302
303
/* Encryption */
304
hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
305
}
306
307
static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
308
{
309
__le16 policy = cpu_to_le16(opt);
310
311
BT_DBG("%s %x", hdev->name, policy);
312
313
/* Default link policy */
314
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
315
}
316
317
/* Get HCI device by index.
318
* Device is held on return. */
319
struct hci_dev *hci_dev_get(int index)
320
{
321
struct hci_dev *hdev = NULL;
322
struct list_head *p;
323
324
BT_DBG("%d", index);
325
326
if (index < 0)
327
return NULL;
328
329
read_lock(&hci_dev_list_lock);
330
list_for_each(p, &hci_dev_list) {
331
struct hci_dev *d = list_entry(p, struct hci_dev, list);
332
if (d->id == index) {
333
hdev = hci_dev_hold(d);
334
break;
335
}
336
}
337
read_unlock(&hci_dev_list_lock);
338
return hdev;
339
}
340
341
/* ---- Inquiry support ---- */
342
static void inquiry_cache_flush(struct hci_dev *hdev)
343
{
344
struct inquiry_cache *cache = &hdev->inq_cache;
345
struct inquiry_entry *next = cache->list, *e;
346
347
BT_DBG("cache %p", cache);
348
349
cache->list = NULL;
350
while ((e = next)) {
351
next = e->next;
352
kfree(e);
353
}
354
}
355
356
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357
{
358
struct inquiry_cache *cache = &hdev->inq_cache;
359
struct inquiry_entry *e;
360
361
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362
363
for (e = cache->list; e; e = e->next)
364
if (!bacmp(&e->data.bdaddr, bdaddr))
365
break;
366
return e;
367
}
368
369
void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370
{
371
struct inquiry_cache *cache = &hdev->inq_cache;
372
struct inquiry_entry *ie;
373
374
BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375
376
ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377
if (!ie) {
378
/* Entry not in the cache. Add new one. */
379
ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380
if (!ie)
381
return;
382
383
ie->next = cache->list;
384
cache->list = ie;
385
}
386
387
memcpy(&ie->data, data, sizeof(*data));
388
ie->timestamp = jiffies;
389
cache->timestamp = jiffies;
390
}
391
392
static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393
{
394
struct inquiry_cache *cache = &hdev->inq_cache;
395
struct inquiry_info *info = (struct inquiry_info *) buf;
396
struct inquiry_entry *e;
397
int copied = 0;
398
399
for (e = cache->list; e && copied < num; e = e->next, copied++) {
400
struct inquiry_data *data = &e->data;
401
bacpy(&info->bdaddr, &data->bdaddr);
402
info->pscan_rep_mode = data->pscan_rep_mode;
403
info->pscan_period_mode = data->pscan_period_mode;
404
info->pscan_mode = data->pscan_mode;
405
memcpy(info->dev_class, data->dev_class, 3);
406
info->clock_offset = data->clock_offset;
407
info++;
408
}
409
410
BT_DBG("cache %p, copied %d", cache, copied);
411
return copied;
412
}
413
414
static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415
{
416
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417
struct hci_cp_inquiry cp;
418
419
BT_DBG("%s", hdev->name);
420
421
if (test_bit(HCI_INQUIRY, &hdev->flags))
422
return;
423
424
/* Start Inquiry */
425
memcpy(&cp.lap, &ir->lap, 3);
426
cp.length = ir->length;
427
cp.num_rsp = ir->num_rsp;
428
hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
429
}
430
431
int hci_inquiry(void __user *arg)
432
{
433
__u8 __user *ptr = arg;
434
struct hci_inquiry_req ir;
435
struct hci_dev *hdev;
436
int err = 0, do_inquiry = 0, max_rsp;
437
long timeo;
438
__u8 *buf;
439
440
if (copy_from_user(&ir, ptr, sizeof(ir)))
441
return -EFAULT;
442
443
hdev = hci_dev_get(ir.dev_id);
444
if (!hdev)
445
return -ENODEV;
446
447
hci_dev_lock_bh(hdev);
448
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
449
inquiry_cache_empty(hdev) ||
450
ir.flags & IREQ_CACHE_FLUSH) {
451
inquiry_cache_flush(hdev);
452
do_inquiry = 1;
453
}
454
hci_dev_unlock_bh(hdev);
455
456
timeo = ir.length * msecs_to_jiffies(2000);
457
458
if (do_inquiry) {
459
err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460
if (err < 0)
461
goto done;
462
}
463
464
/* for unlimited number of responses we will use buffer with 255 entries */
465
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466
467
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
468
* copy it to the user space.
469
*/
470
buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
471
if (!buf) {
472
err = -ENOMEM;
473
goto done;
474
}
475
476
hci_dev_lock_bh(hdev);
477
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478
hci_dev_unlock_bh(hdev);
479
480
BT_DBG("num_rsp %d", ir.num_rsp);
481
482
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483
ptr += sizeof(ir);
484
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485
ir.num_rsp))
486
err = -EFAULT;
487
} else
488
err = -EFAULT;
489
490
kfree(buf);
491
492
done:
493
hci_dev_put(hdev);
494
return err;
495
}
496
497
/* ---- HCI ioctl helpers ---- */
498
499
int hci_dev_open(__u16 dev)
500
{
501
struct hci_dev *hdev;
502
int ret = 0;
503
504
hdev = hci_dev_get(dev);
505
if (!hdev)
506
return -ENODEV;
507
508
BT_DBG("%s %p", hdev->name, hdev);
509
510
hci_req_lock(hdev);
511
512
if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513
ret = -ERFKILL;
514
goto done;
515
}
516
517
if (test_bit(HCI_UP, &hdev->flags)) {
518
ret = -EALREADY;
519
goto done;
520
}
521
522
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523
set_bit(HCI_RAW, &hdev->flags);
524
525
/* Treat all non BR/EDR controllers as raw devices for now */
526
if (hdev->dev_type != HCI_BREDR)
527
set_bit(HCI_RAW, &hdev->flags);
528
529
if (hdev->open(hdev)) {
530
ret = -EIO;
531
goto done;
532
}
533
534
if (!test_bit(HCI_RAW, &hdev->flags)) {
535
atomic_set(&hdev->cmd_cnt, 1);
536
set_bit(HCI_INIT, &hdev->flags);
537
hdev->init_last_cmd = 0;
538
539
ret = __hci_request(hdev, hci_init_req, 0,
540
msecs_to_jiffies(HCI_INIT_TIMEOUT));
541
542
if (lmp_le_capable(hdev))
543
ret = __hci_request(hdev, hci_le_init_req, 0,
544
msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
546
clear_bit(HCI_INIT, &hdev->flags);
547
}
548
549
if (!ret) {
550
hci_dev_hold(hdev);
551
set_bit(HCI_UP, &hdev->flags);
552
hci_notify(hdev, HCI_DEV_UP);
553
if (!test_bit(HCI_SETUP, &hdev->flags))
554
mgmt_powered(hdev->id, 1);
555
} else {
556
/* Init failed, cleanup */
557
tasklet_kill(&hdev->rx_task);
558
tasklet_kill(&hdev->tx_task);
559
tasklet_kill(&hdev->cmd_task);
560
561
skb_queue_purge(&hdev->cmd_q);
562
skb_queue_purge(&hdev->rx_q);
563
564
if (hdev->flush)
565
hdev->flush(hdev);
566
567
if (hdev->sent_cmd) {
568
kfree_skb(hdev->sent_cmd);
569
hdev->sent_cmd = NULL;
570
}
571
572
hdev->close(hdev);
573
hdev->flags = 0;
574
}
575
576
done:
577
hci_req_unlock(hdev);
578
hci_dev_put(hdev);
579
return ret;
580
}
581
582
static int hci_dev_do_close(struct hci_dev *hdev)
583
{
584
BT_DBG("%s %p", hdev->name, hdev);
585
586
hci_req_cancel(hdev, ENODEV);
587
hci_req_lock(hdev);
588
589
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
590
del_timer_sync(&hdev->cmd_timer);
591
hci_req_unlock(hdev);
592
return 0;
593
}
594
595
/* Kill RX and TX tasks */
596
tasklet_kill(&hdev->rx_task);
597
tasklet_kill(&hdev->tx_task);
598
599
hci_dev_lock_bh(hdev);
600
inquiry_cache_flush(hdev);
601
hci_conn_hash_flush(hdev);
602
hci_dev_unlock_bh(hdev);
603
604
hci_notify(hdev, HCI_DEV_DOWN);
605
606
if (hdev->flush)
607
hdev->flush(hdev);
608
609
/* Reset device */
610
skb_queue_purge(&hdev->cmd_q);
611
atomic_set(&hdev->cmd_cnt, 1);
612
if (!test_bit(HCI_RAW, &hdev->flags)) {
613
set_bit(HCI_INIT, &hdev->flags);
614
__hci_request(hdev, hci_reset_req, 0,
615
msecs_to_jiffies(250));
616
clear_bit(HCI_INIT, &hdev->flags);
617
}
618
619
/* Kill cmd task */
620
tasklet_kill(&hdev->cmd_task);
621
622
/* Drop queues */
623
skb_queue_purge(&hdev->rx_q);
624
skb_queue_purge(&hdev->cmd_q);
625
skb_queue_purge(&hdev->raw_q);
626
627
/* Drop last sent command */
628
if (hdev->sent_cmd) {
629
del_timer_sync(&hdev->cmd_timer);
630
kfree_skb(hdev->sent_cmd);
631
hdev->sent_cmd = NULL;
632
}
633
634
/* After this point our queues are empty
635
* and no tasks are scheduled. */
636
hdev->close(hdev);
637
638
mgmt_powered(hdev->id, 0);
639
640
/* Clear flags */
641
hdev->flags = 0;
642
643
hci_req_unlock(hdev);
644
645
hci_dev_put(hdev);
646
return 0;
647
}
648
649
int hci_dev_close(__u16 dev)
650
{
651
struct hci_dev *hdev;
652
int err;
653
654
hdev = hci_dev_get(dev);
655
if (!hdev)
656
return -ENODEV;
657
err = hci_dev_do_close(hdev);
658
hci_dev_put(hdev);
659
return err;
660
}
661
662
int hci_dev_reset(__u16 dev)
663
{
664
struct hci_dev *hdev;
665
int ret = 0;
666
667
hdev = hci_dev_get(dev);
668
if (!hdev)
669
return -ENODEV;
670
671
hci_req_lock(hdev);
672
tasklet_disable(&hdev->tx_task);
673
674
if (!test_bit(HCI_UP, &hdev->flags))
675
goto done;
676
677
/* Drop queues */
678
skb_queue_purge(&hdev->rx_q);
679
skb_queue_purge(&hdev->cmd_q);
680
681
hci_dev_lock_bh(hdev);
682
inquiry_cache_flush(hdev);
683
hci_conn_hash_flush(hdev);
684
hci_dev_unlock_bh(hdev);
685
686
if (hdev->flush)
687
hdev->flush(hdev);
688
689
atomic_set(&hdev->cmd_cnt, 1);
690
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
691
692
if (!test_bit(HCI_RAW, &hdev->flags))
693
ret = __hci_request(hdev, hci_reset_req, 0,
694
msecs_to_jiffies(HCI_INIT_TIMEOUT));
695
696
done:
697
tasklet_enable(&hdev->tx_task);
698
hci_req_unlock(hdev);
699
hci_dev_put(hdev);
700
return ret;
701
}
702
703
int hci_dev_reset_stat(__u16 dev)
704
{
705
struct hci_dev *hdev;
706
int ret = 0;
707
708
hdev = hci_dev_get(dev);
709
if (!hdev)
710
return -ENODEV;
711
712
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
713
714
hci_dev_put(hdev);
715
716
return ret;
717
}
718
719
int hci_dev_cmd(unsigned int cmd, void __user *arg)
720
{
721
struct hci_dev *hdev;
722
struct hci_dev_req dr;
723
int err = 0;
724
725
if (copy_from_user(&dr, arg, sizeof(dr)))
726
return -EFAULT;
727
728
hdev = hci_dev_get(dr.dev_id);
729
if (!hdev)
730
return -ENODEV;
731
732
switch (cmd) {
733
case HCISETAUTH:
734
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
735
msecs_to_jiffies(HCI_INIT_TIMEOUT));
736
break;
737
738
case HCISETENCRYPT:
739
if (!lmp_encrypt_capable(hdev)) {
740
err = -EOPNOTSUPP;
741
break;
742
}
743
744
if (!test_bit(HCI_AUTH, &hdev->flags)) {
745
/* Auth must be enabled first */
746
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
747
msecs_to_jiffies(HCI_INIT_TIMEOUT));
748
if (err)
749
break;
750
}
751
752
err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
753
msecs_to_jiffies(HCI_INIT_TIMEOUT));
754
break;
755
756
case HCISETSCAN:
757
err = hci_request(hdev, hci_scan_req, dr.dev_opt,
758
msecs_to_jiffies(HCI_INIT_TIMEOUT));
759
break;
760
761
case HCISETLINKPOL:
762
err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
763
msecs_to_jiffies(HCI_INIT_TIMEOUT));
764
break;
765
766
case HCISETLINKMODE:
767
hdev->link_mode = ((__u16) dr.dev_opt) &
768
(HCI_LM_MASTER | HCI_LM_ACCEPT);
769
break;
770
771
case HCISETPTYPE:
772
hdev->pkt_type = (__u16) dr.dev_opt;
773
break;
774
775
case HCISETACLMTU:
776
hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
777
hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
778
break;
779
780
case HCISETSCOMTU:
781
hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
782
hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
783
break;
784
785
default:
786
err = -EINVAL;
787
break;
788
}
789
790
hci_dev_put(hdev);
791
return err;
792
}
793
794
int hci_get_dev_list(void __user *arg)
795
{
796
struct hci_dev_list_req *dl;
797
struct hci_dev_req *dr;
798
struct list_head *p;
799
int n = 0, size, err;
800
__u16 dev_num;
801
802
if (get_user(dev_num, (__u16 __user *) arg))
803
return -EFAULT;
804
805
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
806
return -EINVAL;
807
808
size = sizeof(*dl) + dev_num * sizeof(*dr);
809
810
dl = kzalloc(size, GFP_KERNEL);
811
if (!dl)
812
return -ENOMEM;
813
814
dr = dl->dev_req;
815
816
read_lock_bh(&hci_dev_list_lock);
817
list_for_each(p, &hci_dev_list) {
818
struct hci_dev *hdev;
819
820
hdev = list_entry(p, struct hci_dev, list);
821
822
hci_del_off_timer(hdev);
823
824
if (!test_bit(HCI_MGMT, &hdev->flags))
825
set_bit(HCI_PAIRABLE, &hdev->flags);
826
827
(dr + n)->dev_id = hdev->id;
828
(dr + n)->dev_opt = hdev->flags;
829
830
if (++n >= dev_num)
831
break;
832
}
833
read_unlock_bh(&hci_dev_list_lock);
834
835
dl->dev_num = n;
836
size = sizeof(*dl) + n * sizeof(*dr);
837
838
err = copy_to_user(arg, dl, size);
839
kfree(dl);
840
841
return err ? -EFAULT : 0;
842
}
843
844
int hci_get_dev_info(void __user *arg)
845
{
846
struct hci_dev *hdev;
847
struct hci_dev_info di;
848
int err = 0;
849
850
if (copy_from_user(&di, arg, sizeof(di)))
851
return -EFAULT;
852
853
hdev = hci_dev_get(di.dev_id);
854
if (!hdev)
855
return -ENODEV;
856
857
hci_del_off_timer(hdev);
858
859
if (!test_bit(HCI_MGMT, &hdev->flags))
860
set_bit(HCI_PAIRABLE, &hdev->flags);
861
862
strcpy(di.name, hdev->name);
863
di.bdaddr = hdev->bdaddr;
864
di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
865
di.flags = hdev->flags;
866
di.pkt_type = hdev->pkt_type;
867
di.acl_mtu = hdev->acl_mtu;
868
di.acl_pkts = hdev->acl_pkts;
869
di.sco_mtu = hdev->sco_mtu;
870
di.sco_pkts = hdev->sco_pkts;
871
di.link_policy = hdev->link_policy;
872
di.link_mode = hdev->link_mode;
873
874
memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875
memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877
if (copy_to_user(arg, &di, sizeof(di)))
878
err = -EFAULT;
879
880
hci_dev_put(hdev);
881
882
return err;
883
}
884
885
/* ---- Interface to HCI drivers ---- */
886
887
static int hci_rfkill_set_block(void *data, bool blocked)
888
{
889
struct hci_dev *hdev = data;
890
891
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
892
893
if (!blocked)
894
return 0;
895
896
hci_dev_do_close(hdev);
897
898
return 0;
899
}
900
901
static const struct rfkill_ops hci_rfkill_ops = {
902
.set_block = hci_rfkill_set_block,
903
};
904
905
/* Alloc HCI device */
906
struct hci_dev *hci_alloc_dev(void)
907
{
908
struct hci_dev *hdev;
909
910
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
911
if (!hdev)
912
return NULL;
913
914
skb_queue_head_init(&hdev->driver_init);
915
916
return hdev;
917
}
918
EXPORT_SYMBOL(hci_alloc_dev);
919
920
/* Free HCI device */
921
void hci_free_dev(struct hci_dev *hdev)
922
{
923
skb_queue_purge(&hdev->driver_init);
924
925
/* will free via device release */
926
put_device(&hdev->dev);
927
}
928
EXPORT_SYMBOL(hci_free_dev);
929
930
static void hci_power_on(struct work_struct *work)
931
{
932
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
933
934
BT_DBG("%s", hdev->name);
935
936
if (hci_dev_open(hdev->id) < 0)
937
return;
938
939
if (test_bit(HCI_AUTO_OFF, &hdev->flags))
940
mod_timer(&hdev->off_timer,
941
jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
942
943
if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
944
mgmt_index_added(hdev->id);
945
}
946
947
static void hci_power_off(struct work_struct *work)
948
{
949
struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
950
951
BT_DBG("%s", hdev->name);
952
953
hci_dev_close(hdev->id);
954
}
955
956
static void hci_auto_off(unsigned long data)
957
{
958
struct hci_dev *hdev = (struct hci_dev *) data;
959
960
BT_DBG("%s", hdev->name);
961
962
clear_bit(HCI_AUTO_OFF, &hdev->flags);
963
964
queue_work(hdev->workqueue, &hdev->power_off);
965
}
966
967
void hci_del_off_timer(struct hci_dev *hdev)
968
{
969
BT_DBG("%s", hdev->name);
970
971
clear_bit(HCI_AUTO_OFF, &hdev->flags);
972
del_timer(&hdev->off_timer);
973
}
974
975
int hci_uuids_clear(struct hci_dev *hdev)
976
{
977
struct list_head *p, *n;
978
979
list_for_each_safe(p, n, &hdev->uuids) {
980
struct bt_uuid *uuid;
981
982
uuid = list_entry(p, struct bt_uuid, list);
983
984
list_del(p);
985
kfree(uuid);
986
}
987
988
return 0;
989
}
990
991
int hci_link_keys_clear(struct hci_dev *hdev)
992
{
993
struct list_head *p, *n;
994
995
list_for_each_safe(p, n, &hdev->link_keys) {
996
struct link_key *key;
997
998
key = list_entry(p, struct link_key, list);
999
1000
list_del(p);
1001
kfree(key);
1002
}
1003
1004
return 0;
1005
}
1006
1007
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1008
{
1009
struct list_head *p;
1010
1011
list_for_each(p, &hdev->link_keys) {
1012
struct link_key *k;
1013
1014
k = list_entry(p, struct link_key, list);
1015
1016
if (bacmp(bdaddr, &k->bdaddr) == 0)
1017
return k;
1018
}
1019
1020
return NULL;
1021
}
1022
1023
static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1024
u8 key_type, u8 old_key_type)
1025
{
1026
/* Legacy key */
1027
if (key_type < 0x03)
1028
return 1;
1029
1030
/* Debug keys are insecure so don't store them persistently */
1031
if (key_type == HCI_LK_DEBUG_COMBINATION)
1032
return 0;
1033
1034
/* Changed combination key and there's no previous one */
1035
if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1036
return 0;
1037
1038
/* Security mode 3 case */
1039
if (!conn)
1040
return 1;
1041
1042
/* Neither local nor remote side had no-bonding as requirement */
1043
if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1044
return 1;
1045
1046
/* Local side had dedicated bonding as requirement */
1047
if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1048
return 1;
1049
1050
/* Remote side had dedicated bonding as requirement */
1051
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1052
return 1;
1053
1054
/* If none of the above criteria match, then don't store the key
1055
* persistently */
1056
return 0;
1057
}
1058
1059
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1060
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1061
{
1062
struct link_key *key, *old_key;
1063
u8 old_key_type, persistent;
1064
1065
old_key = hci_find_link_key(hdev, bdaddr);
1066
if (old_key) {
1067
old_key_type = old_key->type;
1068
key = old_key;
1069
} else {
1070
old_key_type = conn ? conn->key_type : 0xff;
1071
key = kzalloc(sizeof(*key), GFP_ATOMIC);
1072
if (!key)
1073
return -ENOMEM;
1074
list_add(&key->list, &hdev->link_keys);
1075
}
1076
1077
BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1078
1079
/* Some buggy controller combinations generate a changed
1080
* combination key for legacy pairing even when there's no
1081
* previous key */
1082
if (type == HCI_LK_CHANGED_COMBINATION &&
1083
(!conn || conn->remote_auth == 0xff) &&
1084
old_key_type == 0xff) {
1085
type = HCI_LK_COMBINATION;
1086
if (conn)
1087
conn->key_type = type;
1088
}
1089
1090
bacpy(&key->bdaddr, bdaddr);
1091
memcpy(key->val, val, 16);
1092
key->pin_len = pin_len;
1093
1094
if (type == HCI_LK_CHANGED_COMBINATION)
1095
key->type = old_key_type;
1096
else
1097
key->type = type;
1098
1099
if (!new_key)
1100
return 0;
1101
1102
persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1103
1104
mgmt_new_key(hdev->id, key, persistent);
1105
1106
if (!persistent) {
1107
list_del(&key->list);
1108
kfree(key);
1109
}
1110
1111
return 0;
1112
}
1113
1114
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1115
{
1116
struct link_key *key;
1117
1118
key = hci_find_link_key(hdev, bdaddr);
1119
if (!key)
1120
return -ENOENT;
1121
1122
BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1123
1124
list_del(&key->list);
1125
kfree(key);
1126
1127
return 0;
1128
}
1129
1130
/* HCI command timer function */
1131
static void hci_cmd_timer(unsigned long arg)
1132
{
1133
struct hci_dev *hdev = (void *) arg;
1134
1135
BT_ERR("%s command tx timeout", hdev->name);
1136
atomic_set(&hdev->cmd_cnt, 1);
1137
clear_bit(HCI_RESET, &hdev->flags);
1138
tasklet_schedule(&hdev->cmd_task);
1139
}
1140
1141
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1142
bdaddr_t *bdaddr)
1143
{
1144
struct oob_data *data;
1145
1146
list_for_each_entry(data, &hdev->remote_oob_data, list)
1147
if (bacmp(bdaddr, &data->bdaddr) == 0)
1148
return data;
1149
1150
return NULL;
1151
}
1152
1153
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1154
{
1155
struct oob_data *data;
1156
1157
data = hci_find_remote_oob_data(hdev, bdaddr);
1158
if (!data)
1159
return -ENOENT;
1160
1161
BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1162
1163
list_del(&data->list);
1164
kfree(data);
1165
1166
return 0;
1167
}
1168
1169
int hci_remote_oob_data_clear(struct hci_dev *hdev)
1170
{
1171
struct oob_data *data, *n;
1172
1173
list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1174
list_del(&data->list);
1175
kfree(data);
1176
}
1177
1178
return 0;
1179
}
1180
1181
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1182
u8 *randomizer)
1183
{
1184
struct oob_data *data;
1185
1186
data = hci_find_remote_oob_data(hdev, bdaddr);
1187
1188
if (!data) {
1189
data = kmalloc(sizeof(*data), GFP_ATOMIC);
1190
if (!data)
1191
return -ENOMEM;
1192
1193
bacpy(&data->bdaddr, bdaddr);
1194
list_add(&data->list, &hdev->remote_oob_data);
1195
}
1196
1197
memcpy(data->hash, hash, sizeof(data->hash));
1198
memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1199
1200
BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1201
1202
return 0;
1203
}
1204
1205
/* Register HCI device */
1206
int hci_register_dev(struct hci_dev *hdev)
1207
{
1208
struct list_head *head = &hci_dev_list, *p;
1209
int i, id = 0;
1210
1211
BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1212
hdev->bus, hdev->owner);
1213
1214
if (!hdev->open || !hdev->close || !hdev->destruct)
1215
return -EINVAL;
1216
1217
write_lock_bh(&hci_dev_list_lock);
1218
1219
/* Find first available device id */
1220
list_for_each(p, &hci_dev_list) {
1221
if (list_entry(p, struct hci_dev, list)->id != id)
1222
break;
1223
head = p; id++;
1224
}
1225
1226
sprintf(hdev->name, "hci%d", id);
1227
hdev->id = id;
1228
list_add(&hdev->list, head);
1229
1230
atomic_set(&hdev->refcnt, 1);
1231
spin_lock_init(&hdev->lock);
1232
1233
hdev->flags = 0;
1234
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1235
hdev->esco_type = (ESCO_HV1);
1236
hdev->link_mode = (HCI_LM_ACCEPT);
1237
hdev->io_capability = 0x03; /* No Input No Output */
1238
1239
hdev->idle_timeout = 0;
1240
hdev->sniff_max_interval = 800;
1241
hdev->sniff_min_interval = 80;
1242
1243
tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1244
tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1245
tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1246
1247
skb_queue_head_init(&hdev->rx_q);
1248
skb_queue_head_init(&hdev->cmd_q);
1249
skb_queue_head_init(&hdev->raw_q);
1250
1251
setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1252
1253
for (i = 0; i < NUM_REASSEMBLY; i++)
1254
hdev->reassembly[i] = NULL;
1255
1256
init_waitqueue_head(&hdev->req_wait_q);
1257
mutex_init(&hdev->req_lock);
1258
1259
inquiry_cache_init(hdev);
1260
1261
hci_conn_hash_init(hdev);
1262
1263
INIT_LIST_HEAD(&hdev->blacklist);
1264
1265
INIT_LIST_HEAD(&hdev->uuids);
1266
1267
INIT_LIST_HEAD(&hdev->link_keys);
1268
1269
INIT_LIST_HEAD(&hdev->remote_oob_data);
1270
1271
INIT_WORK(&hdev->power_on, hci_power_on);
1272
INIT_WORK(&hdev->power_off, hci_power_off);
1273
setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1274
1275
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1276
1277
atomic_set(&hdev->promisc, 0);
1278
1279
write_unlock_bh(&hci_dev_list_lock);
1280
1281
hdev->workqueue = create_singlethread_workqueue(hdev->name);
1282
if (!hdev->workqueue)
1283
goto nomem;
1284
1285
hci_register_sysfs(hdev);
1286
1287
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1288
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1289
if (hdev->rfkill) {
1290
if (rfkill_register(hdev->rfkill) < 0) {
1291
rfkill_destroy(hdev->rfkill);
1292
hdev->rfkill = NULL;
1293
}
1294
}
1295
1296
set_bit(HCI_AUTO_OFF, &hdev->flags);
1297
set_bit(HCI_SETUP, &hdev->flags);
1298
queue_work(hdev->workqueue, &hdev->power_on);
1299
1300
hci_notify(hdev, HCI_DEV_REG);
1301
1302
return id;
1303
1304
nomem:
1305
write_lock_bh(&hci_dev_list_lock);
1306
list_del(&hdev->list);
1307
write_unlock_bh(&hci_dev_list_lock);
1308
1309
return -ENOMEM;
1310
}
1311
EXPORT_SYMBOL(hci_register_dev);
1312
1313
/* Unregister HCI device */
1314
int hci_unregister_dev(struct hci_dev *hdev)
1315
{
1316
int i;
1317
1318
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1319
1320
write_lock_bh(&hci_dev_list_lock);
1321
list_del(&hdev->list);
1322
write_unlock_bh(&hci_dev_list_lock);
1323
1324
hci_dev_do_close(hdev);
1325
1326
for (i = 0; i < NUM_REASSEMBLY; i++)
1327
kfree_skb(hdev->reassembly[i]);
1328
1329
if (!test_bit(HCI_INIT, &hdev->flags) &&
1330
!test_bit(HCI_SETUP, &hdev->flags))
1331
mgmt_index_removed(hdev->id);
1332
1333
hci_notify(hdev, HCI_DEV_UNREG);
1334
1335
if (hdev->rfkill) {
1336
rfkill_unregister(hdev->rfkill);
1337
rfkill_destroy(hdev->rfkill);
1338
}
1339
1340
hci_unregister_sysfs(hdev);
1341
1342
hci_del_off_timer(hdev);
1343
1344
destroy_workqueue(hdev->workqueue);
1345
1346
hci_dev_lock_bh(hdev);
1347
hci_blacklist_clear(hdev);
1348
hci_uuids_clear(hdev);
1349
hci_link_keys_clear(hdev);
1350
hci_remote_oob_data_clear(hdev);
1351
hci_dev_unlock_bh(hdev);
1352
1353
__hci_dev_put(hdev);
1354
1355
return 0;
1356
}
1357
EXPORT_SYMBOL(hci_unregister_dev);
1358
1359
/* Suspend HCI device */
1360
int hci_suspend_dev(struct hci_dev *hdev)
1361
{
1362
hci_notify(hdev, HCI_DEV_SUSPEND);
1363
return 0;
1364
}
1365
EXPORT_SYMBOL(hci_suspend_dev);
1366
1367
/* Resume HCI device */
1368
int hci_resume_dev(struct hci_dev *hdev)
1369
{
1370
hci_notify(hdev, HCI_DEV_RESUME);
1371
return 0;
1372
}
1373
EXPORT_SYMBOL(hci_resume_dev);
1374
1375
/* Receive frame from HCI drivers */
1376
int hci_recv_frame(struct sk_buff *skb)
1377
{
1378
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1379
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1380
&& !test_bit(HCI_INIT, &hdev->flags))) {
1381
kfree_skb(skb);
1382
return -ENXIO;
1383
}
1384
1385
/* Incomming skb */
1386
bt_cb(skb)->incoming = 1;
1387
1388
/* Time stamp */
1389
__net_timestamp(skb);
1390
1391
/* Queue frame for rx task */
1392
skb_queue_tail(&hdev->rx_q, skb);
1393
tasklet_schedule(&hdev->rx_task);
1394
1395
return 0;
1396
}
1397
EXPORT_SYMBOL(hci_recv_frame);
1398
1399
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1400
int count, __u8 index)
1401
{
1402
int len = 0;
1403
int hlen = 0;
1404
int remain = count;
1405
struct sk_buff *skb;
1406
struct bt_skb_cb *scb;
1407
1408
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1409
index >= NUM_REASSEMBLY)
1410
return -EILSEQ;
1411
1412
skb = hdev->reassembly[index];
1413
1414
if (!skb) {
1415
switch (type) {
1416
case HCI_ACLDATA_PKT:
1417
len = HCI_MAX_FRAME_SIZE;
1418
hlen = HCI_ACL_HDR_SIZE;
1419
break;
1420
case HCI_EVENT_PKT:
1421
len = HCI_MAX_EVENT_SIZE;
1422
hlen = HCI_EVENT_HDR_SIZE;
1423
break;
1424
case HCI_SCODATA_PKT:
1425
len = HCI_MAX_SCO_SIZE;
1426
hlen = HCI_SCO_HDR_SIZE;
1427
break;
1428
}
1429
1430
skb = bt_skb_alloc(len, GFP_ATOMIC);
1431
if (!skb)
1432
return -ENOMEM;
1433
1434
scb = (void *) skb->cb;
1435
scb->expect = hlen;
1436
scb->pkt_type = type;
1437
1438
skb->dev = (void *) hdev;
1439
hdev->reassembly[index] = skb;
1440
}
1441
1442
while (count) {
1443
scb = (void *) skb->cb;
1444
len = min(scb->expect, (__u16)count);
1445
1446
memcpy(skb_put(skb, len), data, len);
1447
1448
count -= len;
1449
data += len;
1450
scb->expect -= len;
1451
remain = count;
1452
1453
switch (type) {
1454
case HCI_EVENT_PKT:
1455
if (skb->len == HCI_EVENT_HDR_SIZE) {
1456
struct hci_event_hdr *h = hci_event_hdr(skb);
1457
scb->expect = h->plen;
1458
1459
if (skb_tailroom(skb) < scb->expect) {
1460
kfree_skb(skb);
1461
hdev->reassembly[index] = NULL;
1462
return -ENOMEM;
1463
}
1464
}
1465
break;
1466
1467
case HCI_ACLDATA_PKT:
1468
if (skb->len == HCI_ACL_HDR_SIZE) {
1469
struct hci_acl_hdr *h = hci_acl_hdr(skb);
1470
scb->expect = __le16_to_cpu(h->dlen);
1471
1472
if (skb_tailroom(skb) < scb->expect) {
1473
kfree_skb(skb);
1474
hdev->reassembly[index] = NULL;
1475
return -ENOMEM;
1476
}
1477
}
1478
break;
1479
1480
case HCI_SCODATA_PKT:
1481
if (skb->len == HCI_SCO_HDR_SIZE) {
1482
struct hci_sco_hdr *h = hci_sco_hdr(skb);
1483
scb->expect = h->dlen;
1484
1485
if (skb_tailroom(skb) < scb->expect) {
1486
kfree_skb(skb);
1487
hdev->reassembly[index] = NULL;
1488
return -ENOMEM;
1489
}
1490
}
1491
break;
1492
}
1493
1494
if (scb->expect == 0) {
1495
/* Complete frame */
1496
1497
bt_cb(skb)->pkt_type = type;
1498
hci_recv_frame(skb);
1499
1500
hdev->reassembly[index] = NULL;
1501
return remain;
1502
}
1503
}
1504
1505
return remain;
1506
}
1507
1508
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1509
{
1510
int rem = 0;
1511
1512
if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1513
return -EILSEQ;
1514
1515
while (count) {
1516
rem = hci_reassembly(hdev, type, data, count, type - 1);
1517
if (rem < 0)
1518
return rem;
1519
1520
data += (count - rem);
1521
count = rem;
1522
};
1523
1524
return rem;
1525
}
1526
EXPORT_SYMBOL(hci_recv_fragment);
1527
1528
#define STREAM_REASSEMBLY 0
1529
1530
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1531
{
1532
int type;
1533
int rem = 0;
1534
1535
while (count) {
1536
struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1537
1538
if (!skb) {
1539
struct { char type; } *pkt;
1540
1541
/* Start of the frame */
1542
pkt = data;
1543
type = pkt->type;
1544
1545
data++;
1546
count--;
1547
} else
1548
type = bt_cb(skb)->pkt_type;
1549
1550
rem = hci_reassembly(hdev, type, data, count,
1551
STREAM_REASSEMBLY);
1552
if (rem < 0)
1553
return rem;
1554
1555
data += (count - rem);
1556
count = rem;
1557
};
1558
1559
return rem;
1560
}
1561
EXPORT_SYMBOL(hci_recv_stream_fragment);
1562
1563
/* ---- Interface to upper protocols ---- */
1564
1565
/* Register/Unregister protocols.
1566
* hci_task_lock is used to ensure that no tasks are running. */
1567
int hci_register_proto(struct hci_proto *hp)
1568
{
1569
int err = 0;
1570
1571
BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1572
1573
if (hp->id >= HCI_MAX_PROTO)
1574
return -EINVAL;
1575
1576
write_lock_bh(&hci_task_lock);
1577
1578
if (!hci_proto[hp->id])
1579
hci_proto[hp->id] = hp;
1580
else
1581
err = -EEXIST;
1582
1583
write_unlock_bh(&hci_task_lock);
1584
1585
return err;
1586
}
1587
EXPORT_SYMBOL(hci_register_proto);
1588
1589
int hci_unregister_proto(struct hci_proto *hp)
1590
{
1591
int err = 0;
1592
1593
BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1594
1595
if (hp->id >= HCI_MAX_PROTO)
1596
return -EINVAL;
1597
1598
write_lock_bh(&hci_task_lock);
1599
1600
if (hci_proto[hp->id])
1601
hci_proto[hp->id] = NULL;
1602
else
1603
err = -ENOENT;
1604
1605
write_unlock_bh(&hci_task_lock);
1606
1607
return err;
1608
}
1609
EXPORT_SYMBOL(hci_unregister_proto);
1610
1611
int hci_register_cb(struct hci_cb *cb)
1612
{
1613
BT_DBG("%p name %s", cb, cb->name);
1614
1615
write_lock_bh(&hci_cb_list_lock);
1616
list_add(&cb->list, &hci_cb_list);
1617
write_unlock_bh(&hci_cb_list_lock);
1618
1619
return 0;
1620
}
1621
EXPORT_SYMBOL(hci_register_cb);
1622
1623
int hci_unregister_cb(struct hci_cb *cb)
1624
{
1625
BT_DBG("%p name %s", cb, cb->name);
1626
1627
write_lock_bh(&hci_cb_list_lock);
1628
list_del(&cb->list);
1629
write_unlock_bh(&hci_cb_list_lock);
1630
1631
return 0;
1632
}
1633
EXPORT_SYMBOL(hci_unregister_cb);
1634
1635
static int hci_send_frame(struct sk_buff *skb)
1636
{
1637
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1638
1639
if (!hdev) {
1640
kfree_skb(skb);
1641
return -ENODEV;
1642
}
1643
1644
BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1645
1646
if (atomic_read(&hdev->promisc)) {
1647
/* Time stamp */
1648
__net_timestamp(skb);
1649
1650
hci_send_to_sock(hdev, skb, NULL);
1651
}
1652
1653
/* Get rid of skb owner, prior to sending to the driver. */
1654
skb_orphan(skb);
1655
1656
return hdev->send(skb);
1657
}
1658
1659
/* Send HCI command */
1660
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1661
{
1662
int len = HCI_COMMAND_HDR_SIZE + plen;
1663
struct hci_command_hdr *hdr;
1664
struct sk_buff *skb;
1665
1666
BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1667
1668
skb = bt_skb_alloc(len, GFP_ATOMIC);
1669
if (!skb) {
1670
BT_ERR("%s no memory for command", hdev->name);
1671
return -ENOMEM;
1672
}
1673
1674
hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1675
hdr->opcode = cpu_to_le16(opcode);
1676
hdr->plen = plen;
1677
1678
if (plen)
1679
memcpy(skb_put(skb, plen), param, plen);
1680
1681
BT_DBG("skb len %d", skb->len);
1682
1683
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1684
skb->dev = (void *) hdev;
1685
1686
if (test_bit(HCI_INIT, &hdev->flags))
1687
hdev->init_last_cmd = opcode;
1688
1689
skb_queue_tail(&hdev->cmd_q, skb);
1690
tasklet_schedule(&hdev->cmd_task);
1691
1692
return 0;
1693
}
1694
1695
/* Get data from the previously sent command */
1696
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1697
{
1698
struct hci_command_hdr *hdr;
1699
1700
if (!hdev->sent_cmd)
1701
return NULL;
1702
1703
hdr = (void *) hdev->sent_cmd->data;
1704
1705
if (hdr->opcode != cpu_to_le16(opcode))
1706
return NULL;
1707
1708
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1709
1710
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1711
}
1712
1713
/* Send ACL data */
1714
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1715
{
1716
struct hci_acl_hdr *hdr;
1717
int len = skb->len;
1718
1719
skb_push(skb, HCI_ACL_HDR_SIZE);
1720
skb_reset_transport_header(skb);
1721
hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1722
hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1723
hdr->dlen = cpu_to_le16(len);
1724
}
1725
1726
void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1727
{
1728
struct hci_dev *hdev = conn->hdev;
1729
struct sk_buff *list;
1730
1731
BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1732
1733
skb->dev = (void *) hdev;
1734
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1735
hci_add_acl_hdr(skb, conn->handle, flags);
1736
1737
list = skb_shinfo(skb)->frag_list;
1738
if (!list) {
1739
/* Non fragmented */
1740
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1741
1742
skb_queue_tail(&conn->data_q, skb);
1743
} else {
1744
/* Fragmented */
1745
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1746
1747
skb_shinfo(skb)->frag_list = NULL;
1748
1749
/* Queue all fragments atomically */
1750
spin_lock_bh(&conn->data_q.lock);
1751
1752
__skb_queue_tail(&conn->data_q, skb);
1753
1754
flags &= ~ACL_START;
1755
flags |= ACL_CONT;
1756
do {
1757
skb = list; list = list->next;
1758
1759
skb->dev = (void *) hdev;
1760
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1761
hci_add_acl_hdr(skb, conn->handle, flags);
1762
1763
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1764
1765
__skb_queue_tail(&conn->data_q, skb);
1766
} while (list);
1767
1768
spin_unlock_bh(&conn->data_q.lock);
1769
}
1770
1771
tasklet_schedule(&hdev->tx_task);
1772
}
1773
EXPORT_SYMBOL(hci_send_acl);
1774
1775
/* Send SCO data */
1776
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1777
{
1778
struct hci_dev *hdev = conn->hdev;
1779
struct hci_sco_hdr hdr;
1780
1781
BT_DBG("%s len %d", hdev->name, skb->len);
1782
1783
hdr.handle = cpu_to_le16(conn->handle);
1784
hdr.dlen = skb->len;
1785
1786
skb_push(skb, HCI_SCO_HDR_SIZE);
1787
skb_reset_transport_header(skb);
1788
memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1789
1790
skb->dev = (void *) hdev;
1791
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1792
1793
skb_queue_tail(&conn->data_q, skb);
1794
tasklet_schedule(&hdev->tx_task);
1795
}
1796
EXPORT_SYMBOL(hci_send_sco);
1797
1798
/* ---- HCI TX task (outgoing data) ---- */
1799
1800
/* HCI Connection scheduler */
1801
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1802
{
1803
struct hci_conn_hash *h = &hdev->conn_hash;
1804
struct hci_conn *conn = NULL;
1805
int num = 0, min = ~0;
1806
struct list_head *p;
1807
1808
/* We don't have to lock device here. Connections are always
1809
* added and removed with TX task disabled. */
1810
list_for_each(p, &h->list) {
1811
struct hci_conn *c;
1812
c = list_entry(p, struct hci_conn, list);
1813
1814
if (c->type != type || skb_queue_empty(&c->data_q))
1815
continue;
1816
1817
if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1818
continue;
1819
1820
num++;
1821
1822
if (c->sent < min) {
1823
min = c->sent;
1824
conn = c;
1825
}
1826
}
1827
1828
if (conn) {
1829
int cnt, q;
1830
1831
switch (conn->type) {
1832
case ACL_LINK:
1833
cnt = hdev->acl_cnt;
1834
break;
1835
case SCO_LINK:
1836
case ESCO_LINK:
1837
cnt = hdev->sco_cnt;
1838
break;
1839
case LE_LINK:
1840
cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1841
break;
1842
default:
1843
cnt = 0;
1844
BT_ERR("Unknown link type");
1845
}
1846
1847
q = cnt / num;
1848
*quote = q ? q : 1;
1849
} else
1850
*quote = 0;
1851
1852
BT_DBG("conn %p quote %d", conn, *quote);
1853
return conn;
1854
}
1855
1856
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1857
{
1858
struct hci_conn_hash *h = &hdev->conn_hash;
1859
struct list_head *p;
1860
struct hci_conn *c;
1861
1862
BT_ERR("%s link tx timeout", hdev->name);
1863
1864
/* Kill stalled connections */
1865
list_for_each(p, &h->list) {
1866
c = list_entry(p, struct hci_conn, list);
1867
if (c->type == type && c->sent) {
1868
BT_ERR("%s killing stalled connection %s",
1869
hdev->name, batostr(&c->dst));
1870
hci_acl_disconn(c, 0x13);
1871
}
1872
}
1873
}
1874
1875
static inline void hci_sched_acl(struct hci_dev *hdev)
1876
{
1877
struct hci_conn *conn;
1878
struct sk_buff *skb;
1879
int quote;
1880
1881
BT_DBG("%s", hdev->name);
1882
1883
if (!test_bit(HCI_RAW, &hdev->flags)) {
1884
/* ACL tx timeout must be longer than maximum
1885
* link supervision timeout (40.9 seconds) */
1886
if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1887
hci_link_tx_to(hdev, ACL_LINK);
1888
}
1889
1890
while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1891
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1892
BT_DBG("skb %p len %d", skb, skb->len);
1893
1894
hci_conn_enter_active_mode(conn);
1895
1896
hci_send_frame(skb);
1897
hdev->acl_last_tx = jiffies;
1898
1899
hdev->acl_cnt--;
1900
conn->sent++;
1901
}
1902
}
1903
}
1904
1905
/* Schedule SCO */
1906
static inline void hci_sched_sco(struct hci_dev *hdev)
1907
{
1908
struct hci_conn *conn;
1909
struct sk_buff *skb;
1910
int quote;
1911
1912
BT_DBG("%s", hdev->name);
1913
1914
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1915
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1916
BT_DBG("skb %p len %d", skb, skb->len);
1917
hci_send_frame(skb);
1918
1919
conn->sent++;
1920
if (conn->sent == ~0)
1921
conn->sent = 0;
1922
}
1923
}
1924
}
1925
1926
static inline void hci_sched_esco(struct hci_dev *hdev)
1927
{
1928
struct hci_conn *conn;
1929
struct sk_buff *skb;
1930
int quote;
1931
1932
BT_DBG("%s", hdev->name);
1933
1934
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1935
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1936
BT_DBG("skb %p len %d", skb, skb->len);
1937
hci_send_frame(skb);
1938
1939
conn->sent++;
1940
if (conn->sent == ~0)
1941
conn->sent = 0;
1942
}
1943
}
1944
}
1945
1946
static inline void hci_sched_le(struct hci_dev *hdev)
1947
{
1948
struct hci_conn *conn;
1949
struct sk_buff *skb;
1950
int quote, cnt;
1951
1952
BT_DBG("%s", hdev->name);
1953
1954
if (!test_bit(HCI_RAW, &hdev->flags)) {
1955
/* LE tx timeout must be longer than maximum
1956
* link supervision timeout (40.9 seconds) */
1957
if (!hdev->le_cnt && hdev->le_pkts &&
1958
time_after(jiffies, hdev->le_last_tx + HZ * 45))
1959
hci_link_tx_to(hdev, LE_LINK);
1960
}
1961
1962
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1963
while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1964
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1965
BT_DBG("skb %p len %d", skb, skb->len);
1966
1967
hci_send_frame(skb);
1968
hdev->le_last_tx = jiffies;
1969
1970
cnt--;
1971
conn->sent++;
1972
}
1973
}
1974
if (hdev->le_pkts)
1975
hdev->le_cnt = cnt;
1976
else
1977
hdev->acl_cnt = cnt;
1978
}
1979
1980
static void hci_tx_task(unsigned long arg)
1981
{
1982
struct hci_dev *hdev = (struct hci_dev *) arg;
1983
struct sk_buff *skb;
1984
1985
read_lock(&hci_task_lock);
1986
1987
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1988
hdev->sco_cnt, hdev->le_cnt);
1989
1990
/* Schedule queues and send stuff to HCI driver */
1991
1992
hci_sched_acl(hdev);
1993
1994
hci_sched_sco(hdev);
1995
1996
hci_sched_esco(hdev);
1997
1998
hci_sched_le(hdev);
1999
2000
/* Send next queued raw (unknown type) packet */
2001
while ((skb = skb_dequeue(&hdev->raw_q)))
2002
hci_send_frame(skb);
2003
2004
read_unlock(&hci_task_lock);
2005
}
2006
2007
/* ----- HCI RX task (incoming data processing) ----- */
2008
2009
/* ACL data packet */
2010
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2011
{
2012
struct hci_acl_hdr *hdr = (void *) skb->data;
2013
struct hci_conn *conn;
2014
__u16 handle, flags;
2015
2016
skb_pull(skb, HCI_ACL_HDR_SIZE);
2017
2018
handle = __le16_to_cpu(hdr->handle);
2019
flags = hci_flags(handle);
2020
handle = hci_handle(handle);
2021
2022
BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2023
2024
hdev->stat.acl_rx++;
2025
2026
hci_dev_lock(hdev);
2027
conn = hci_conn_hash_lookup_handle(hdev, handle);
2028
hci_dev_unlock(hdev);
2029
2030
if (conn) {
2031
register struct hci_proto *hp;
2032
2033
hci_conn_enter_active_mode(conn);
2034
2035
/* Send to upper protocol */
2036
hp = hci_proto[HCI_PROTO_L2CAP];
2037
if (hp && hp->recv_acldata) {
2038
hp->recv_acldata(conn, skb, flags);
2039
return;
2040
}
2041
} else {
2042
BT_ERR("%s ACL packet for unknown connection handle %d",
2043
hdev->name, handle);
2044
}
2045
2046
kfree_skb(skb);
2047
}
2048
2049
/* SCO data packet */
2050
static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2051
{
2052
struct hci_sco_hdr *hdr = (void *) skb->data;
2053
struct hci_conn *conn;
2054
__u16 handle;
2055
2056
skb_pull(skb, HCI_SCO_HDR_SIZE);
2057
2058
handle = __le16_to_cpu(hdr->handle);
2059
2060
BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2061
2062
hdev->stat.sco_rx++;
2063
2064
hci_dev_lock(hdev);
2065
conn = hci_conn_hash_lookup_handle(hdev, handle);
2066
hci_dev_unlock(hdev);
2067
2068
if (conn) {
2069
register struct hci_proto *hp;
2070
2071
/* Send to upper protocol */
2072
hp = hci_proto[HCI_PROTO_SCO];
2073
if (hp && hp->recv_scodata) {
2074
hp->recv_scodata(conn, skb);
2075
return;
2076
}
2077
} else {
2078
BT_ERR("%s SCO packet for unknown connection handle %d",
2079
hdev->name, handle);
2080
}
2081
2082
kfree_skb(skb);
2083
}
2084
2085
static void hci_rx_task(unsigned long arg)
2086
{
2087
struct hci_dev *hdev = (struct hci_dev *) arg;
2088
struct sk_buff *skb;
2089
2090
BT_DBG("%s", hdev->name);
2091
2092
read_lock(&hci_task_lock);
2093
2094
while ((skb = skb_dequeue(&hdev->rx_q))) {
2095
if (atomic_read(&hdev->promisc)) {
2096
/* Send copy to the sockets */
2097
hci_send_to_sock(hdev, skb, NULL);
2098
}
2099
2100
if (test_bit(HCI_RAW, &hdev->flags)) {
2101
kfree_skb(skb);
2102
continue;
2103
}
2104
2105
if (test_bit(HCI_INIT, &hdev->flags)) {
2106
/* Don't process data packets in this states. */
2107
switch (bt_cb(skb)->pkt_type) {
2108
case HCI_ACLDATA_PKT:
2109
case HCI_SCODATA_PKT:
2110
kfree_skb(skb);
2111
continue;
2112
}
2113
}
2114
2115
/* Process frame */
2116
switch (bt_cb(skb)->pkt_type) {
2117
case HCI_EVENT_PKT:
2118
hci_event_packet(hdev, skb);
2119
break;
2120
2121
case HCI_ACLDATA_PKT:
2122
BT_DBG("%s ACL data packet", hdev->name);
2123
hci_acldata_packet(hdev, skb);
2124
break;
2125
2126
case HCI_SCODATA_PKT:
2127
BT_DBG("%s SCO data packet", hdev->name);
2128
hci_scodata_packet(hdev, skb);
2129
break;
2130
2131
default:
2132
kfree_skb(skb);
2133
break;
2134
}
2135
}
2136
2137
read_unlock(&hci_task_lock);
2138
}
2139
2140
static void hci_cmd_task(unsigned long arg)
2141
{
2142
struct hci_dev *hdev = (struct hci_dev *) arg;
2143
struct sk_buff *skb;
2144
2145
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2146
2147
/* Send queued commands */
2148
if (atomic_read(&hdev->cmd_cnt)) {
2149
skb = skb_dequeue(&hdev->cmd_q);
2150
if (!skb)
2151
return;
2152
2153
kfree_skb(hdev->sent_cmd);
2154
2155
hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2156
if (hdev->sent_cmd) {
2157
atomic_dec(&hdev->cmd_cnt);
2158
hci_send_frame(skb);
2159
mod_timer(&hdev->cmd_timer,
2160
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2161
} else {
2162
skb_queue_head(&hdev->cmd_q, skb);
2163
tasklet_schedule(&hdev->cmd_task);
2164
}
2165
}
2166
}
2167
2168